From 123fcb0917f5884a84dbafdbaa456e8c8d0bce39 Mon Sep 17 00:00:00 2001 From: ebembi-crdb Date: Fri, 9 Jan 2026 18:41:56 +0530 Subject: [PATCH 1/2] Archive v1.0 documentation Following the same process used for v2.1 (PR #21195) and subsequent versions. Removes: - src/current/v1.0/ directory (180 files) - src/current/_includes/v1.0/ directory - src/current/_includes/sidebar-data-v1.0.json - src/current/_includes/releases/v1.0/ (51 files) Keeps: - src/current/releases/v1.0.md with archived notice Total: ~337 files removed --- .../_includes/releases/v1.0/beta-20160407.md | 83 - .../_includes/releases/v1.0/beta-20160414.md | 92 - .../_includes/releases/v1.0/beta-20160421.md | 97 - .../_includes/releases/v1.0/beta-20160428.md | 64 - .../_includes/releases/v1.0/beta-20160505.md | 76 - .../_includes/releases/v1.0/beta-20160512.md | 47 - .../_includes/releases/v1.0/beta-20160519.md | 45 - .../_includes/releases/v1.0/beta-20160526.md | 37 - .../_includes/releases/v1.0/beta-20160602.md | 34 - .../_includes/releases/v1.0/beta-20160609.md | 38 - .../_includes/releases/v1.0/beta-20160616.md | 46 - .../_includes/releases/v1.0/beta-20160629.md | 56 - .../_includes/releases/v1.0/beta-20160714.md | 67 - .../_includes/releases/v1.0/beta-20160721.md | 45 - .../_includes/releases/v1.0/beta-20160728.md | 37 - .../_includes/releases/v1.0/beta-20160829.md | 78 - .../_includes/releases/v1.0/beta-20160908.md | 50 - .../_includes/releases/v1.0/beta-20160915.md | 37 - .../_includes/releases/v1.0/beta-20160929.md | 52 - .../_includes/releases/v1.0/beta-20161006.md | 20 - .../_includes/releases/v1.0/beta-20161013.md | 113 - .../_includes/releases/v1.0/beta-20161027.md | 59 - .../_includes/releases/v1.0/beta-20161103.md | 62 - .../_includes/releases/v1.0/beta-20161110.md | 7 - .../_includes/releases/v1.0/beta-20161201.md | 140 - .../_includes/releases/v1.0/beta-20161208.md | 56 - .../_includes/releases/v1.0/beta-20161215.md | 40 - .../_includes/releases/v1.0/beta-20170105.md | 84 - .../_includes/releases/v1.0/beta-20170112.md | 60 - .../_includes/releases/v1.0/beta-20170126.md | 83 - .../_includes/releases/v1.0/beta-20170209.md | 66 - .../_includes/releases/v1.0/beta-20170216.md | 39 - .../_includes/releases/v1.0/beta-20170223.md | 37 - .../_includes/releases/v1.0/beta-20170309.md | 58 - .../_includes/releases/v1.0/beta-20170323.md | 80 - .../_includes/releases/v1.0/beta-20170330.md | 43 - .../_includes/releases/v1.0/beta-20170406.md | 5 - .../_includes/releases/v1.0/beta-20170413.md | 105 - .../_includes/releases/v1.0/beta-20170420.md | 69 - .../_includes/releases/v1.0/v1.0-rc.1.md | 111 - .../_includes/releases/v1.0/v1.0-rc.2.md | 65 - src/current/_includes/releases/v1.0/v1.0.1.md | 39 - src/current/_includes/releases/v1.0/v1.0.2.md | 45 - src/current/_includes/releases/v1.0/v1.0.3.md | 33 - src/current/_includes/releases/v1.0/v1.0.4.md | 21 - src/current/_includes/releases/v1.0/v1.0.5.md | 24 - src/current/_includes/releases/v1.0/v1.0.6.md | 19 - src/current/_includes/releases/v1.0/v1.0.7.md | 8 - src/current/_includes/releases/v1.0/v1.0.md | 20 - src/current/_includes/sidebar-data-v1.0.json | 1102 -- .../_includes/v1.0/app/BasicSample.java | 34 - src/current/_includes/v1.0/app/TxnSample.java | 113 - .../v1.0/app/activerecord-basic-sample.rb | 45 - src/current/_includes/v1.0/app/basic-sample.c | 0 .../_includes/v1.0/app/basic-sample.clj | 31 - .../_includes/v1.0/app/basic-sample.cpp | 41 - .../_includes/v1.0/app/basic-sample.go | 44 - .../_includes/v1.0/app/basic-sample.js | 55 - .../_includes/v1.0/app/basic-sample.php | 20 - .../_includes/v1.0/app/basic-sample.py | 28 - .../_includes/v1.0/app/basic-sample.rb | 22 - .../_includes/v1.0/app/basic-sample.rs | 22 - .../_includes/v1.0/app/common-steps.md | 36 - .../_includes/v1.0/app/gorm-basic-sample.go | 41 - .../app/hibernate-basic-sample/Sample.java | 64 - .../app/hibernate-basic-sample/build.gradle | 16 - .../hibernate-basic-sample.tgz | Bin 1613 -> 0 bytes .../hibernate-basic-sample/hibernate.cfg.xml | 18 - src/current/_includes/v1.0/app/project.clj | 7 - .../v1.0/app/sequelize-basic-sample.js | 35 - .../v1.0/app/sqlalchemy-basic-sample.py | 32 - src/current/_includes/v1.0/app/txn-sample.clj | 43 - src/current/_includes/v1.0/app/txn-sample.cpp | 76 - src/current/_includes/v1.0/app/txn-sample.go | 51 - src/current/_includes/v1.0/app/txn-sample.js | 141 - src/current/_includes/v1.0/app/txn-sample.php | 71 - src/current/_includes/v1.0/app/txn-sample.py | 68 - src/current/_includes/v1.0/app/txn-sample.rb | 43 - src/current/_includes/v1.0/app/txn-sample.rs | 59 - src/current/_includes/v1.0/app/util.clj | 38 - .../v1.0/faq/auto-generate-unique-ids.html | 15 - .../v1.0/faq/simulate-key-value-store.html | 13 - .../v1.0/faq/when-to-interleave-tables.html | 5 - .../v1.0/misc/diagnostics-callout.html | 1 - .../v1.0/misc/experimental-warning.md | 3 - .../_includes/v1.0/misc/external-urls.md | 19 - .../_includes/v1.0/misc/logging-flags.md | 8 - .../v1.0/misc/prometheus-callout.html | 1 - .../v1.0/misc/remove-user-callout.html | 1 - .../v1.0/sql/diagrams/add_column.html | 58 - .../v1.0/sql/diagrams/add_constraint.html | 41 - .../v1.0/sql/diagrams/alter_column.html | 59 - .../v1.0/sql/diagrams/alter_view.html | 36 - .../_includes/v1.0/sql/diagrams/backup.html | 65 - .../v1.0/sql/diagrams/begin_transaction.html | 62 - .../v1.0/sql/diagrams/check_column_level.html | 70 - .../v1.0/sql/diagrams/check_table_level.html | 60 - .../v1.0/sql/diagrams/col_qual_list.html | 110 - .../v1.0/sql/diagrams/column_def.html | 23 - .../v1.0/sql/diagrams/commit_transaction.html | 17 - .../v1.0/sql/diagrams/create_database.html | 61 - .../v1.0/sql/diagrams/create_index.html | 84 - .../v1.0/sql/diagrams/create_table.html | 62 - .../v1.0/sql/diagrams/create_table_as.html | 50 - .../v1.0/sql/diagrams/create_user.html | 30 - .../v1.0/sql/diagrams/create_view.html | 38 - .../diagrams/default_value_column_level.html | 64 - .../_includes/v1.0/sql/diagrams/delete.html | 63 - .../v1.0/sql/diagrams/drop_column.html | 48 - .../v1.0/sql/diagrams/drop_constraint.html | 42 - .../v1.0/sql/diagrams/drop_database.html | 25 - .../v1.0/sql/diagrams/drop_index.html | 42 - .../v1.0/sql/diagrams/drop_table.html | 34 - .../v1.0/sql/diagrams/drop_view.html | 34 - .../_includes/v1.0/sql/diagrams/explain.html | 40 - .../v1.0/sql/diagrams/family_def.html | 30 - .../diagrams/foreign_key_column_level.html | 75 - .../sql/diagrams/foreign_key_table_level.html | 85 - .../_includes/v1.0/sql/diagrams/grammar.html | 9043 ----------------- .../_includes/v1.0/sql/diagrams/grant.html | 74 - .../v1.0/sql/diagrams/index_def.html | 55 - .../_includes/v1.0/sql/diagrams/insert.html | 65 - .../v1.0/sql/diagrams/interleave.html | 64 - .../sql/diagrams/not_null_column_level.html | 59 - .../v1.0/sql/diagrams/opt_interleave.html | 33 - .../diagrams/primary_key_column_level.html | 59 - .../sql/diagrams/primary_key_table_level.html | 63 - .../v1.0/sql/diagrams/release_savepoint.html | 19 - .../v1.0/sql/diagrams/rename_column.html | 44 - .../v1.0/sql/diagrams/rename_database.html | 30 - .../v1.0/sql/diagrams/rename_index.html | 44 - .../v1.0/sql/diagrams/rename_table.html | 36 - .../_includes/v1.0/sql/diagrams/restore.html | 52 - .../_includes/v1.0/sql/diagrams/revoke.html | 74 - .../sql/diagrams/rollback_transaction.html | 22 - .../v1.0/sql/diagrams/savepoint.html | 19 - .../_includes/v1.0/sql/diagrams/select.html | 120 - .../sql/diagrams/set_cluster_setting.html | 39 - .../v1.0/sql/diagrams/set_transaction.html | 62 - .../_includes/v1.0/sql/diagrams/set_var.html | 53 - .../sql/diagrams/show_cluster_setting.html | 31 - .../v1.0/sql/diagrams/show_columns.html | 22 - .../v1.0/sql/diagrams/show_constraints.html | 22 - .../v1.0/sql/diagrams/show_create_table.html | 22 - .../v1.0/sql/diagrams/show_create_view.html | 22 - .../v1.0/sql/diagrams/show_databases.html | 14 - .../v1.0/sql/diagrams/show_grants.html | 50 - .../v1.0/sql/diagrams/show_index.html | 22 - .../v1.0/sql/diagrams/show_tables.html | 22 - .../v1.0/sql/diagrams/show_users.html | 14 - .../_includes/v1.0/sql/diagrams/show_var.html | 17 - .../_includes/v1.0/sql/diagrams/table.html | 0 .../v1.0/sql/diagrams/table_constraint.html | 110 - .../_includes/v1.0/sql/diagrams/truncate.html | 28 - .../sql/diagrams/unique_column_level.html | 59 - .../v1.0/sql/diagrams/unique_table_level.html | 63 - .../_includes/v1.0/sql/diagrams/update.html | 101 - .../_includes/v1.0/sql/diagrams/upsert.html | 60 - .../v1.0/start-in-docker/mac-linux-steps.md | 160 - src/current/openssl_fix.rb | 27 + src/current/releases/v1.0.md | 7 +- src/current/v1.0/404.md | 19 - src/current/v1.0/add-column.md | 128 - src/current/v1.0/add-constraint.md | 119 - src/current/v1.0/alter-column.md | 61 - src/current/v1.0/alter-table.md | 22 - src/current/v1.0/alter-view.md | 71 - src/current/v1.0/as-of-system-time.md | 44 - .../v1.0/automated-scaling-and-repair.md | 17 - src/current/v1.0/back-up-data.md | 26 - src/current/v1.0/backup.md | 140 - src/current/v1.0/begin-transaction.md | 119 - src/current/v1.0/bool.md | 74 - .../v1.0/build-a-c++-app-with-cockroachdb.md | 75 - .../build-a-clojure-app-with-cockroachdb.md | 114 - .../build-a-go-app-with-cockroachdb-gorm.md | 100 - .../v1.0/build-a-go-app-with-cockroachdb.md | 128 - ...d-a-java-app-with-cockroachdb-hibernate.md | 113 - .../v1.0/build-a-java-app-with-cockroachdb.md | 80 - ...a-nodejs-app-with-cockroachdb-sequelize.md | 99 - .../build-a-nodejs-app-with-cockroachdb.md | 122 - .../v1.0/build-a-php-app-with-cockroachdb.md | 87 - ...-python-app-with-cockroachdb-sqlalchemy.md | 110 - .../build-a-python-app-with-cockroachdb.md | 117 - ...-ruby-app-with-cockroachdb-activerecord.md | 101 - .../v1.0/build-a-ruby-app-with-cockroachdb.md | 107 - .../v1.0/build-a-rust-app-with-cockroachdb.md | 87 - .../v1.0/build-an-app-with-cockroachdb.md | 23 - src/current/v1.0/bytes.md | 71 - src/current/v1.0/check.md | 109 - src/current/v1.0/cloud-deployment.md | 31 - src/current/v1.0/cluster-settings.md | 73 - .../v1.0/cluster-setup-troubleshooting.md | 194 - src/current/v1.0/cockroach-commands.md | 41 - src/current/v1.0/cockroachdb-architecture.md | 8 - src/current/v1.0/cockroachdb-in-comparison.md | 260 - src/current/v1.0/collate.md | 124 - src/current/v1.0/column-families.md | 89 - src/current/v1.0/commit-transaction.md | 66 - src/current/v1.0/common-errors.md | 76 - .../v1.0/configure-replication-zones.md | 588 -- src/current/v1.0/constraints.md | 115 - src/current/v1.0/create-and-manage-users.md | 184 - src/current/v1.0/create-database.md | 99 - src/current/v1.0/create-index.md | 128 - .../v1.0/create-security-certificates.md | 272 - src/current/v1.0/create-table-as.md | 216 - src/current/v1.0/create-table.md | 318 - src/current/v1.0/create-user.md | 105 - src/current/v1.0/create-view.md | 105 - src/current/v1.0/data-types.md | 45 - src/current/v1.0/date.md | 79 - src/current/v1.0/debug-and-error-logs.md | 148 - src/current/v1.0/debug-zip.md | 97 - src/current/v1.0/decimal.md | 82 - src/current/v1.0/default-value.md | 69 - src/current/v1.0/delete.md | 185 - .../v1.0/demo-automatic-cloud-migration.md | 228 - .../v1.0/demo-automatic-rebalancing.md | 177 - src/current/v1.0/demo-data-replication.md | 230 - .../v1.0/demo-fault-tolerance-and-recovery.md | 346 - .../deploy-cockroachdb-on-aws-insecure.md | 293 - src/current/v1.0/deploy-cockroachdb-on-aws.md | 451 - ...y-cockroachdb-on-digital-ocean-insecure.md | 277 - .../deploy-cockroachdb-on-digital-ocean.md | 430 - ...achdb-on-google-cloud-platform-insecure.md | 291 - ...oy-cockroachdb-on-google-cloud-platform.md | 447 - ...cockroachdb-on-microsoft-azure-insecure.md | 306 - .../deploy-cockroachdb-on-microsoft-azure.md | 452 - src/current/v1.0/diagnostics-reporting.md | 240 - src/current/v1.0/distributed-transactions.md | 18 - src/current/v1.0/drop-column.md | 76 - src/current/v1.0/drop-constraint.md | 63 - src/current/v1.0/drop-database.md | 72 - src/current/v1.0/drop-index.md | 103 - src/current/v1.0/drop-table.md | 129 - src/current/v1.0/drop-view.md | 122 - src/current/v1.0/explain.md | 294 - src/current/v1.0/explore-the-admin-ui.md | 21 - src/current/v1.0/file-an-issue.md | 65 - src/current/v1.0/float.md | 83 - src/current/v1.0/foreign-key.md | 166 - .../v1.0/frequently-asked-questions.md | 171 - src/current/v1.0/functions-and-operators.md | 68 - .../v1.0/generate-cockroachdb-resources.md | 282 - src/current/v1.0/go-implementation.md | 21 - src/current/v1.0/grant.md | 116 - src/current/v1.0/high-availability.md | 21 - src/current/v1.0/import-data.md | 116 - src/current/v1.0/improve-the-docs.md | 20 - src/current/v1.0/index.md | 32 - src/current/v1.0/indexes.md | 126 - src/current/v1.0/information-schema.md | 177 - src/current/v1.0/insert.md | 500 - src/current/v1.0/install-client-drivers.md | 22 - src/current/v1.0/install-cockroachdb.html | 508 - src/current/v1.0/int.md | 105 - src/current/v1.0/interleave-in-parent.md | 166 - .../internal/version-switcher-page-data.json | 17 - src/current/v1.0/interval.md | 101 - src/current/v1.0/keywords-and-identifiers.md | 46 - src/current/v1.0/known-limitations.md | 483 - src/current/v1.0/learn-cockroachdb-sql.md | 413 - .../v1.0/manual-deployment-insecure.md | 306 - src/current/v1.0/manual-deployment.md | 458 - .../monitor-cockroachdb-with-prometheus.md | 168 - src/current/v1.0/multi-active-availability.md | 66 - src/current/v1.0/not-null.md | 72 - src/current/v1.0/null-handling.md | 355 - src/current/v1.0/open-source.md | 14 - src/current/v1.0/operational-faqs.md | 56 - ...-cockroachdb-with-docker-swarm-insecure.md | 347 - ...chestrate-cockroachdb-with-docker-swarm.md | 586 -- ...orchestrate-cockroachdb-with-kubernetes.md | 366 - src/current/v1.0/orchestration.md | 20 - src/current/v1.0/porting-postgres.md | 97 - src/current/v1.0/primary-key.md | 116 - src/current/v1.0/privileges.md | 45 - .../v1.0/query-behavior-troubleshooting.md | 43 - .../v1.0/recommended-production-settings.md | 297 - src/current/v1.0/release-savepoint.md | 52 - src/current/v1.0/rename-column.md | 63 - src/current/v1.0/rename-database.md | 88 - src/current/v1.0/rename-index.md | 72 - src/current/v1.0/rename-table.md | 131 - src/current/v1.0/restore-data.md | 18 - src/current/v1.0/restore.md | 174 - src/current/v1.0/revoke.md | 152 - src/current/v1.0/rollback-transaction.md | 75 - src/current/v1.0/savepoint.md | 48 - src/current/v1.0/secure-a-cluster.md | 300 - src/current/v1.0/select.md | 629 -- src/current/v1.0/serial.md | 122 - src/current/v1.0/set-cluster-setting.md | 71 - src/current/v1.0/set-transaction.md | 92 - src/current/v1.0/set-vars.md | 162 - src/current/v1.0/show-cluster-setting.md | 89 - src/current/v1.0/show-columns.md | 71 - src/current/v1.0/show-constraints.md | 77 - src/current/v1.0/show-create-table.md | 71 - src/current/v1.0/show-create-view.md | 74 - src/current/v1.0/show-databases.md | 40 - src/current/v1.0/show-grants.md | 160 - src/current/v1.0/show-index.md | 80 - src/current/v1.0/show-tables.md | 68 - src/current/v1.0/show-users.md | 37 - src/current/v1.0/show-vars.md | 115 - src/current/v1.0/simplified-deployment.md | 15 - src/current/v1.0/sql-constants.md | 235 - src/current/v1.0/sql-dump.md | 349 - src/current/v1.0/sql-expressions.md | 716 -- src/current/v1.0/sql-faqs.md | 121 - src/current/v1.0/sql-feature-support.md | 166 - src/current/v1.0/sql-grammar.md | 41 - src/current/v1.0/sql-name-resolution.md | 47 - src/current/v1.0/sql-statements.md | 101 - src/current/v1.0/sql.md | 15 - .../v1.0/start-a-local-cluster-in-docker.md | 270 - src/current/v1.0/start-a-local-cluster.md | 265 - src/current/v1.0/start-a-node.md | 164 - src/current/v1.0/stop-a-node.md | 117 - src/current/v1.0/string.md | 104 - src/current/v1.0/strong-consistency.md | 47 - src/current/v1.0/support-resources.md | 16 - src/current/v1.0/table-expressions.md | 211 - src/current/v1.0/timestamp.md | 116 - src/current/v1.0/transactions.md | 259 - src/current/v1.0/troubleshoot.md | 104 - src/current/v1.0/troubleshooting-overview.md | 18 - src/current/v1.0/truncate.md | 133 - src/current/v1.0/unique.md | 116 - src/current/v1.0/update.md | 409 - src/current/v1.0/upgrade-cockroach-version.md | 162 - src/current/v1.0/upsert.md | 156 - .../v1.0/use-the-built-in-sql-client.md | 363 - src/current/v1.0/view-node-details.md | 124 - src/current/v1.0/view-version-details.md | 41 - src/current/v1.0/views.md | 355 - src/current/v1.0/window-functions.md | 9 - 339 files changed, 33 insertions(+), 44710 deletions(-) delete mode 100644 src/current/_includes/releases/v1.0/beta-20160407.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160414.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160421.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160428.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160505.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160512.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160519.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160526.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160602.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160609.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160616.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160629.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160714.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160721.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160728.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160829.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160908.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160915.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20160929.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20161006.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20161013.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20161027.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20161103.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20161110.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20161201.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20161208.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20161215.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170105.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170112.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170126.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170209.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170216.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170223.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170309.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170323.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170330.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170406.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170413.md delete mode 100644 src/current/_includes/releases/v1.0/beta-20170420.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0-rc.1.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0-rc.2.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0.1.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0.2.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0.3.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0.4.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0.5.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0.6.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0.7.md delete mode 100644 src/current/_includes/releases/v1.0/v1.0.md delete mode 100644 src/current/_includes/sidebar-data-v1.0.json delete mode 100644 src/current/_includes/v1.0/app/BasicSample.java delete mode 100644 src/current/_includes/v1.0/app/TxnSample.java delete mode 100644 src/current/_includes/v1.0/app/activerecord-basic-sample.rb delete mode 100644 src/current/_includes/v1.0/app/basic-sample.c delete mode 100644 src/current/_includes/v1.0/app/basic-sample.clj delete mode 100644 src/current/_includes/v1.0/app/basic-sample.cpp delete mode 100644 src/current/_includes/v1.0/app/basic-sample.go delete mode 100644 src/current/_includes/v1.0/app/basic-sample.js delete mode 100644 src/current/_includes/v1.0/app/basic-sample.php delete mode 100644 src/current/_includes/v1.0/app/basic-sample.py delete mode 100644 src/current/_includes/v1.0/app/basic-sample.rb delete mode 100644 src/current/_includes/v1.0/app/basic-sample.rs delete mode 100644 src/current/_includes/v1.0/app/common-steps.md delete mode 100644 src/current/_includes/v1.0/app/gorm-basic-sample.go delete mode 100644 src/current/_includes/v1.0/app/hibernate-basic-sample/Sample.java delete mode 100644 src/current/_includes/v1.0/app/hibernate-basic-sample/build.gradle delete mode 100644 src/current/_includes/v1.0/app/hibernate-basic-sample/hibernate-basic-sample.tgz delete mode 100644 src/current/_includes/v1.0/app/hibernate-basic-sample/hibernate.cfg.xml delete mode 100644 src/current/_includes/v1.0/app/project.clj delete mode 100644 src/current/_includes/v1.0/app/sequelize-basic-sample.js delete mode 100644 src/current/_includes/v1.0/app/sqlalchemy-basic-sample.py delete mode 100644 src/current/_includes/v1.0/app/txn-sample.clj delete mode 100644 src/current/_includes/v1.0/app/txn-sample.cpp delete mode 100644 src/current/_includes/v1.0/app/txn-sample.go delete mode 100644 src/current/_includes/v1.0/app/txn-sample.js delete mode 100644 src/current/_includes/v1.0/app/txn-sample.php delete mode 100644 src/current/_includes/v1.0/app/txn-sample.py delete mode 100644 src/current/_includes/v1.0/app/txn-sample.rb delete mode 100644 src/current/_includes/v1.0/app/txn-sample.rs delete mode 100644 src/current/_includes/v1.0/app/util.clj delete mode 100644 src/current/_includes/v1.0/faq/auto-generate-unique-ids.html delete mode 100644 src/current/_includes/v1.0/faq/simulate-key-value-store.html delete mode 100644 src/current/_includes/v1.0/faq/when-to-interleave-tables.html delete mode 100644 src/current/_includes/v1.0/misc/diagnostics-callout.html delete mode 100644 src/current/_includes/v1.0/misc/experimental-warning.md delete mode 100644 src/current/_includes/v1.0/misc/external-urls.md delete mode 100644 src/current/_includes/v1.0/misc/logging-flags.md delete mode 100644 src/current/_includes/v1.0/misc/prometheus-callout.html delete mode 100644 src/current/_includes/v1.0/misc/remove-user-callout.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/add_column.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/add_constraint.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/alter_column.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/alter_view.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/backup.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/begin_transaction.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/check_column_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/check_table_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/col_qual_list.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/column_def.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/commit_transaction.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/create_database.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/create_index.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/create_table.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/create_table_as.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/create_user.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/create_view.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/default_value_column_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/delete.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/drop_column.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/drop_constraint.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/drop_database.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/drop_index.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/drop_table.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/drop_view.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/explain.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/family_def.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/foreign_key_column_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/foreign_key_table_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/grammar.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/grant.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/index_def.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/insert.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/interleave.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/not_null_column_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/opt_interleave.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/primary_key_column_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/primary_key_table_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/release_savepoint.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/rename_column.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/rename_database.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/rename_index.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/rename_table.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/restore.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/revoke.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/rollback_transaction.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/savepoint.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/select.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/set_cluster_setting.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/set_transaction.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/set_var.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_cluster_setting.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_columns.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_constraints.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_create_table.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_create_view.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_databases.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_grants.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_index.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_tables.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_users.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/show_var.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/table.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/table_constraint.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/truncate.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/unique_column_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/unique_table_level.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/update.html delete mode 100644 src/current/_includes/v1.0/sql/diagrams/upsert.html delete mode 100644 src/current/_includes/v1.0/start-in-docker/mac-linux-steps.md create mode 100644 src/current/openssl_fix.rb delete mode 100755 src/current/v1.0/404.md delete mode 100644 src/current/v1.0/add-column.md delete mode 100644 src/current/v1.0/add-constraint.md delete mode 100644 src/current/v1.0/alter-column.md delete mode 100644 src/current/v1.0/alter-table.md delete mode 100644 src/current/v1.0/alter-view.md delete mode 100644 src/current/v1.0/as-of-system-time.md delete mode 100644 src/current/v1.0/automated-scaling-and-repair.md delete mode 100644 src/current/v1.0/back-up-data.md delete mode 100644 src/current/v1.0/backup.md delete mode 100644 src/current/v1.0/begin-transaction.md delete mode 100644 src/current/v1.0/bool.md delete mode 100644 src/current/v1.0/build-a-c++-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-a-clojure-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-a-go-app-with-cockroachdb-gorm.md delete mode 100644 src/current/v1.0/build-a-go-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-a-java-app-with-cockroachdb-hibernate.md delete mode 100644 src/current/v1.0/build-a-java-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-a-nodejs-app-with-cockroachdb-sequelize.md delete mode 100644 src/current/v1.0/build-a-nodejs-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-a-php-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-a-python-app-with-cockroachdb-sqlalchemy.md delete mode 100644 src/current/v1.0/build-a-python-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-a-ruby-app-with-cockroachdb-activerecord.md delete mode 100644 src/current/v1.0/build-a-ruby-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-a-rust-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/build-an-app-with-cockroachdb.md delete mode 100644 src/current/v1.0/bytes.md delete mode 100644 src/current/v1.0/check.md delete mode 100644 src/current/v1.0/cloud-deployment.md delete mode 100644 src/current/v1.0/cluster-settings.md delete mode 100644 src/current/v1.0/cluster-setup-troubleshooting.md delete mode 100644 src/current/v1.0/cockroach-commands.md delete mode 100644 src/current/v1.0/cockroachdb-architecture.md delete mode 100644 src/current/v1.0/cockroachdb-in-comparison.md delete mode 100644 src/current/v1.0/collate.md delete mode 100644 src/current/v1.0/column-families.md delete mode 100644 src/current/v1.0/commit-transaction.md delete mode 100644 src/current/v1.0/common-errors.md delete mode 100644 src/current/v1.0/configure-replication-zones.md delete mode 100644 src/current/v1.0/constraints.md delete mode 100644 src/current/v1.0/create-and-manage-users.md delete mode 100644 src/current/v1.0/create-database.md delete mode 100644 src/current/v1.0/create-index.md delete mode 100644 src/current/v1.0/create-security-certificates.md delete mode 100644 src/current/v1.0/create-table-as.md delete mode 100644 src/current/v1.0/create-table.md delete mode 100644 src/current/v1.0/create-user.md delete mode 100644 src/current/v1.0/create-view.md delete mode 100644 src/current/v1.0/data-types.md delete mode 100644 src/current/v1.0/date.md delete mode 100644 src/current/v1.0/debug-and-error-logs.md delete mode 100644 src/current/v1.0/debug-zip.md delete mode 100644 src/current/v1.0/decimal.md delete mode 100644 src/current/v1.0/default-value.md delete mode 100644 src/current/v1.0/delete.md delete mode 100644 src/current/v1.0/demo-automatic-cloud-migration.md delete mode 100644 src/current/v1.0/demo-automatic-rebalancing.md delete mode 100644 src/current/v1.0/demo-data-replication.md delete mode 100644 src/current/v1.0/demo-fault-tolerance-and-recovery.md delete mode 100644 src/current/v1.0/deploy-cockroachdb-on-aws-insecure.md delete mode 100644 src/current/v1.0/deploy-cockroachdb-on-aws.md delete mode 100644 src/current/v1.0/deploy-cockroachdb-on-digital-ocean-insecure.md delete mode 100644 src/current/v1.0/deploy-cockroachdb-on-digital-ocean.md delete mode 100644 src/current/v1.0/deploy-cockroachdb-on-google-cloud-platform-insecure.md delete mode 100644 src/current/v1.0/deploy-cockroachdb-on-google-cloud-platform.md delete mode 100644 src/current/v1.0/deploy-cockroachdb-on-microsoft-azure-insecure.md delete mode 100644 src/current/v1.0/deploy-cockroachdb-on-microsoft-azure.md delete mode 100644 src/current/v1.0/diagnostics-reporting.md delete mode 100644 src/current/v1.0/distributed-transactions.md delete mode 100644 src/current/v1.0/drop-column.md delete mode 100644 src/current/v1.0/drop-constraint.md delete mode 100644 src/current/v1.0/drop-database.md delete mode 100644 src/current/v1.0/drop-index.md delete mode 100644 src/current/v1.0/drop-table.md delete mode 100644 src/current/v1.0/drop-view.md delete mode 100644 src/current/v1.0/explain.md delete mode 100644 src/current/v1.0/explore-the-admin-ui.md delete mode 100644 src/current/v1.0/file-an-issue.md delete mode 100644 src/current/v1.0/float.md delete mode 100644 src/current/v1.0/foreign-key.md delete mode 100644 src/current/v1.0/frequently-asked-questions.md delete mode 100644 src/current/v1.0/functions-and-operators.md delete mode 100644 src/current/v1.0/generate-cockroachdb-resources.md delete mode 100644 src/current/v1.0/go-implementation.md delete mode 100644 src/current/v1.0/grant.md delete mode 100644 src/current/v1.0/high-availability.md delete mode 100644 src/current/v1.0/import-data.md delete mode 100644 src/current/v1.0/improve-the-docs.md delete mode 100755 src/current/v1.0/index.md delete mode 100644 src/current/v1.0/indexes.md delete mode 100644 src/current/v1.0/information-schema.md delete mode 100644 src/current/v1.0/insert.md delete mode 100644 src/current/v1.0/install-client-drivers.md delete mode 100644 src/current/v1.0/install-cockroachdb.html delete mode 100644 src/current/v1.0/int.md delete mode 100644 src/current/v1.0/interleave-in-parent.md delete mode 100644 src/current/v1.0/internal/version-switcher-page-data.json delete mode 100644 src/current/v1.0/interval.md delete mode 100644 src/current/v1.0/keywords-and-identifiers.md delete mode 100644 src/current/v1.0/known-limitations.md delete mode 100644 src/current/v1.0/learn-cockroachdb-sql.md delete mode 100644 src/current/v1.0/manual-deployment-insecure.md delete mode 100644 src/current/v1.0/manual-deployment.md delete mode 100644 src/current/v1.0/monitor-cockroachdb-with-prometheus.md delete mode 100644 src/current/v1.0/multi-active-availability.md delete mode 100644 src/current/v1.0/not-null.md delete mode 100644 src/current/v1.0/null-handling.md delete mode 100644 src/current/v1.0/open-source.md delete mode 100644 src/current/v1.0/operational-faqs.md delete mode 100644 src/current/v1.0/orchestrate-cockroachdb-with-docker-swarm-insecure.md delete mode 100644 src/current/v1.0/orchestrate-cockroachdb-with-docker-swarm.md delete mode 100644 src/current/v1.0/orchestrate-cockroachdb-with-kubernetes.md delete mode 100644 src/current/v1.0/orchestration.md delete mode 100644 src/current/v1.0/porting-postgres.md delete mode 100644 src/current/v1.0/primary-key.md delete mode 100644 src/current/v1.0/privileges.md delete mode 100644 src/current/v1.0/query-behavior-troubleshooting.md delete mode 100644 src/current/v1.0/recommended-production-settings.md delete mode 100644 src/current/v1.0/release-savepoint.md delete mode 100644 src/current/v1.0/rename-column.md delete mode 100644 src/current/v1.0/rename-database.md delete mode 100644 src/current/v1.0/rename-index.md delete mode 100644 src/current/v1.0/rename-table.md delete mode 100644 src/current/v1.0/restore-data.md delete mode 100644 src/current/v1.0/restore.md delete mode 100644 src/current/v1.0/revoke.md delete mode 100644 src/current/v1.0/rollback-transaction.md delete mode 100644 src/current/v1.0/savepoint.md delete mode 100644 src/current/v1.0/secure-a-cluster.md delete mode 100644 src/current/v1.0/select.md delete mode 100644 src/current/v1.0/serial.md delete mode 100644 src/current/v1.0/set-cluster-setting.md delete mode 100644 src/current/v1.0/set-transaction.md delete mode 100644 src/current/v1.0/set-vars.md delete mode 100644 src/current/v1.0/show-cluster-setting.md delete mode 100644 src/current/v1.0/show-columns.md delete mode 100644 src/current/v1.0/show-constraints.md delete mode 100644 src/current/v1.0/show-create-table.md delete mode 100644 src/current/v1.0/show-create-view.md delete mode 100644 src/current/v1.0/show-databases.md delete mode 100644 src/current/v1.0/show-grants.md delete mode 100644 src/current/v1.0/show-index.md delete mode 100644 src/current/v1.0/show-tables.md delete mode 100644 src/current/v1.0/show-users.md delete mode 100644 src/current/v1.0/show-vars.md delete mode 100644 src/current/v1.0/simplified-deployment.md delete mode 100644 src/current/v1.0/sql-constants.md delete mode 100644 src/current/v1.0/sql-dump.md delete mode 100644 src/current/v1.0/sql-expressions.md delete mode 100644 src/current/v1.0/sql-faqs.md delete mode 100644 src/current/v1.0/sql-feature-support.md delete mode 100644 src/current/v1.0/sql-grammar.md delete mode 100644 src/current/v1.0/sql-name-resolution.md delete mode 100644 src/current/v1.0/sql-statements.md delete mode 100644 src/current/v1.0/sql.md delete mode 100644 src/current/v1.0/start-a-local-cluster-in-docker.md delete mode 100644 src/current/v1.0/start-a-local-cluster.md delete mode 100644 src/current/v1.0/start-a-node.md delete mode 100644 src/current/v1.0/stop-a-node.md delete mode 100644 src/current/v1.0/string.md delete mode 100644 src/current/v1.0/strong-consistency.md delete mode 100644 src/current/v1.0/support-resources.md delete mode 100644 src/current/v1.0/table-expressions.md delete mode 100644 src/current/v1.0/timestamp.md delete mode 100644 src/current/v1.0/transactions.md delete mode 100644 src/current/v1.0/troubleshoot.md delete mode 100644 src/current/v1.0/troubleshooting-overview.md delete mode 100644 src/current/v1.0/truncate.md delete mode 100644 src/current/v1.0/unique.md delete mode 100644 src/current/v1.0/update.md delete mode 100644 src/current/v1.0/upgrade-cockroach-version.md delete mode 100644 src/current/v1.0/upsert.md delete mode 100644 src/current/v1.0/use-the-built-in-sql-client.md delete mode 100644 src/current/v1.0/view-node-details.md delete mode 100644 src/current/v1.0/view-version-details.md delete mode 100644 src/current/v1.0/views.md delete mode 100644 src/current/v1.0/window-functions.md diff --git a/src/current/_includes/releases/v1.0/beta-20160407.md b/src/current/_includes/releases/v1.0/beta-20160407.md deleted file mode 100644 index 33ef0e87d23..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160407.md +++ /dev/null @@ -1,83 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-incompatible Changes

- -* Any databases using the [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) type will need to be deleted and - created from scratch. Columns of this type were encoded incorrectly - by `beta-20160330`. - [#5820](https://github.com/cockroachdb/cockroach/pull/5820) -* [`SELECT`](https://www.cockroachlabs.com/docs/v1.0/select) statements must now specify one or more columns or - expressions. Previously, statements such as `SELECT FROM t` were - allowed but the results would confuse many clients. - [#5859](https://github.com/cockroachdb/cockroach/pull/5859) -* It is now an error to insert a value that is too large into a column - with a type of limited length such as [`VARCHAR(n)`](https://www.cockroachlabs.com/docs/v1.0/string). - [#5750](https://github.com/cockroachdb/cockroach/pull/5750) - -

Compatibility

- -* Added support for the "flush" message in the PostgreSQL network - protocol, which improves compatibility with the [node.js driver](https://www.cockroachlabs.com/docs/v1.0/install-client-drivers). - [#5740](https://github.com/cockroachdb/cockroach/pull/5740) -* Fixed a panic when handling certain queries sent by the PHP PDO - library. [#5783](https://github.com/cockroachdb/cockroach/pull/5783) -* Improved parsing of timestamps for compatibility with the Go - `lib/pq` client library. - [#5877](https://github.com/cockroachdb/cockroach/pull/5877) - -

New Features

- -* Index hints: `SELECT FROM tbl@idx` or `SELECT FROM - tbl@{FORCE_INDEX=idx}` instructs the query planner to use the given - index. [#5785](https://github.com/cockroachdb/cockroach/pull/5785) - and [#5806](https://github.com/cockroachdb/cockroach/pull/5806) -* The `cockroach` command-line client now accepts environment - variables as default values for many command-line flags. See - `--help` for details. - [#5430](https://github.com/cockroachdb/cockroach/pull/5430) -* Added SQL `version()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators). - [#5763](https://github.com/cockroachdb/cockroach/pull/5763) -* Added compiler and platform information to `cockroach version` - output. [#5766](https://github.com/cockroachdb/cockroach/pull/5766) -* Debugging tools show fractional seconds in timestamps. - [#5736](https://github.com/cockroachdb/cockroach/pull/5736) -* In secure mode, plain HTTP requests to the [`http-port`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) used by the - admin UI are now redirected to HTTPS. - [#5746](https://github.com/cockroachdb/cockroach/pull/5746) -* Links to debugging pages are available on the HTTP port at - `/debug/`. - [#5795](https://github.com/cockroachdb/cockroach/pull/5795) -* The [`cockroach zone set`](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) command now echos the new configuration. - [#5829](https://github.com/cockroachdb/cockroach/pull/5829) - -

Bug Fixes

- -* Improved flow control for raft snapshots, fixing an issue that could - lead to nodes running out of memory. - [#5721](https://github.com/cockroachdb/cockroach/pull/5721) -* Fixed a deadlock scenario in which two conflicting transactions - would both be unable to make progress. - [#5710](https://github.com/cockroachdb/cockroach/pull/5710) -* Fixed a panic while executing certain [`DELETE`](https://www.cockroachlabs.com/docs/v1.0/delete) statements. - [#5840](https://github.com/cockroachdb/cockroach/pull/5840) - -

Internal Changes

- -* Clock offsets are now measured continuously. - [#5512](https://github.com/cockroachdb/cockroach/pull/5512) -* Improved caching to avoid redundant range descriptor lookups. - [#5627](https://github.com/cockroachdb/cockroach/pull/5627) -* Reduced log spam when nodes are down. - [#5883](https://github.com/cockroachdb/cockroach/pull/5883) - -

Contributors

- -This release includes 77 merged PRs by 18 authors. We would like to -thank the following contributors from the CockroachDB community, -especially -[first-time contributor Seif Lotfy](https://github.com/cockroachdb/cockroach/pull/5747). - -* Kenji Kaneda -* Seif Lotfy diff --git a/src/current/_includes/releases/v1.0/beta-20160414.md b/src/current/_includes/releases/v1.0/beta-20160414.md deleted file mode 100644 index e5d62d7bd13..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160414.md +++ /dev/null @@ -1,92 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-incompatible Changes

- -* Any databases using the [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) type will need to be deleted and created from scratch. Columns of this type were encoded incorrectly in older beta releases. (Again! We apologize for the inconvenience.) [#5994](https://github.com/cockroachdb/cockroach/pull/5994) -* The [SQL function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) `transaction_timestamp_unique` has been removed in favor of the new `cluster_logical_timestamp` function described below. - -

New Features

- -* [`ALTER TABLE`](https://www.cockroachlabs.com/docs/v1.0/alter-table) supports several new operations: `ADD COLUMN` with a - default value - ([#5759](https://github.com/cockroachdb/cockroach/pull/5759)), - `ALTER COLUMN SET DEFAULT`, `ALTER COLUMN DROP DEFAULT`, and `ALTER - COLUMN DROP NOT NULL` - ([#5947](https://github.com/cockroachdb/cockroach/pull/5947)) -* The `TIMESTAMPTZ` type is now accepted as an alias for [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp) - for compatibility with PostgreSQL; our `TIMESTAMP` type already - supports time zones in the same way as PostgreSQL's `TIMESTAMPTZ`. - [#5893](https://github.com/cockroachdb/cockroach/pull/5893) -* The [`STRING`](https://www.cockroachlabs.com/docs/v1.0/string) type now accepts a length parameter (e.g., - `STRING(50)`), similar to `CHAR` and `VARCHAR`. - [#5918](https://github.com/cockroachdb/cockroach/pull/5918) -* The `LIMIT` clause now accepts placeholders. - [#5977](https://github.com/cockroachdb/cockroach/pull/5977) -* The `RETURNING` clause of [`INSERT`](https://www.cockroachlabs.com/docs/v1.0/insert), [`DELETE`](https://www.cockroachlabs.com/docs/v1.0/delete), and [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) - statements now accepts placeholders. - [#5934](https://github.com/cockroachdb/cockroach/pull/5934) -* When sending queries to the `cockroach sql` tool via `stdin`, a - terminating semicolon is not required and instructions for - interactive mode are no longer printed (similar to the existing - behavior of `-e`). - [#5988](https://github.com/cockroachdb/cockroach/pull/5988) -* The `experimental_uuid_v4()` [SQL function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) has been renamed to - `uuid_v4()`. The old name remains as a deprecated alias. - [#5886](https://github.com/cockroachdb/cockroach/pull/5886) - -

Performance Improvements

- -* Reduced the number of files created in the data directory, and fixed - issues in which the process could run out of file descriptors. - [#5888](https://github.com/cockroachdb/cockroach/pull/5888) -* Introduced a special fast path for transactions that are contained - entirely within one range. [#5966](https://github.com/cockroachdb/cockroach/pull/5966) -* Reduced the number of unnecessary network round trips by ensuring - that the range lease holder and the raft leader coincide. - [#5973](https://github.com/cockroachdb/cockroach/pull/5973) - -

Bug Fixes

- -* The gossip system no longer recommends redundant forwarding - addresses, which could prevent the cluster from fully connecting. - [#5901](https://github.com/cockroachdb/cockroach/pull/5901) -* Timestamps are now correctly reported in the network protocol as - having time zone offsets. - [#5909](https://github.com/cockroachdb/cockroach/pull/5909) -* Buffered data is now flushed to the network immediately on error. - [#5909](https://github.com/cockroachdb/cockroach/pull/5909) -* Timeseries data no longer diverges across replicas. - [#5905](https://github.com/cockroachdb/cockroach/pull/5905) - -

Internal Changes

- -* [SQL functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) related to the current time (`now`, - `current_timestamp`, `statement_timestamp`, `statement_timestamp`) - now use the clock of the node that executed them instead of a - timestamp derived from the internal hybrid logical clock. This means - that these functions are less likely to produce duplicate values, - but it is more likely for one transaction to see a timestamp that is - less than a timestamp already seen in another transaction. - Applications that require a globally monotonic timestamp-like value - can use the new function `cluster_logical_timestamp` instead. - [#5805](https://github.com/cockroachdb/cockroach/pull/5805) -* New HTTP endpoint `/debug/metrics` exposes internal metric data in - JSON format. - [#5894](https://github.com/cockroachdb/cockroach/pull/5894) -* [Logs](https://www.cockroachlabs.com/docs/v1.0/debug-and-error-logs) are no longer written to `stderr` by default, only to the - `logs` subdirectory of the data directory. - [#5979](https://github.com/cockroachdb/cockroach/pull/5979) - -

Contributors

- -This release includes 58 merged PRs by 18 authors. We would like to -thank the following contributors from the CockroachDB community, -especially -[first-time contributor Andrey Shinkevich](https://github.com/cockroachdb/cockroach/pull/5956). - -* Andrey Shinkevich -* Kenji Kaneda -* Seif Lotfy -* es-chow diff --git a/src/current/_includes/releases/v1.0/beta-20160421.md b/src/current/_includes/releases/v1.0/beta-20160421.md deleted file mode 100644 index a0059cc3edf..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160421.md +++ /dev/null @@ -1,97 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Upgrade Procedure

- -* This release cannot be run concurrently with older beta releases. - Please stop all nodes running older releases before restarting any - node with this version. - -

New Features

- -* [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) statements may now set columns which are part of the - primary key. - [#6043](https://github.com/cockroachdb/cockroach/pull/6043) -* `CHECK` constraints can now be defined when creating tables and will - be enforced for [`INSERT`](https://www.cockroachlabs.com/docs/v1.0/insert) statements (but not yet for [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) - statements). - [#6044](https://github.com/cockroachdb/cockroach/pull/6044) -* The `pow()` and `exp()` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) on `DECIMAL` values no longer lose - precision. - [#6170](https://github.com/cockroachdb/cockroach/pull/6170) -* In the [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell, the special command `!` can now be used - to execute a system command and display its output. `|` executes a - system command and runs its output as a SQL statement. - [#5961](https://github.com/cockroachdb/cockroach/pull/5961) -* Added `SHOW CREATE TABLE`. - [#6158](https://github.com/cockroachdb/cockroach/pull/6158) -* The `RETURNING` clause of [`INSERT`](https://www.cockroachlabs.com/docs/v1.0/insert) statements can now refer to tables - by aliases defined with `AS`. - [#5903](https://github.com/cockroachdb/cockroach/pull/5903) -* The SQL parser now accepts the `CASCADE` and `RESTRICT` keywords in - the [`DROP INDEX`](https://www.cockroachlabs.com/docs/v1.0/drop-index), [`TRUNCATE TABLE`](https://www.cockroachlabs.com/docs/v1.0/truncate), [`ALTER TABLE DROP COLUMN`](https://www.cockroachlabs.com/docs/v1.0/alter-table), and [`ALTER TABLE DROP CONSTRAINT`](https://www.cockroachlabs.com/docs/v1.0/alter-table) statements. These keywords relate to - features we have not yet implemented, so they do nothing, but this change - improves compatibility with frameworks that like to send these - keywords. - [#5957](https://github.com/cockroachdb/cockroach/pull/5957) - -

Admin UI

- -* The time scale used for graphs can now be changed. - [#6145](https://github.com/cockroachdb/cockroach/pull/6145) -* Some UI elements now include tooltips with additional explanations. - [#6006](https://github.com/cockroachdb/cockroach/pull/6006) -* A warning is displayed when the cluster is unreachable. - [#6042](https://github.com/cockroachdb/cockroach/pull/6042) -* There are now fewer unnecessary scrollbars. - [#6018](https://github.com/cockroachdb/cockroach/pull/6018) - -

Bug fixes

- -* The rebalancer is now better at distributing ranges across the nodes - in a cluster. - [#6133](https://github.com/cockroachdb/cockroach/pull/6133) -* Fixed a major cause of "timestamp in future" errors. - [#5845](https://github.com/cockroachdb/cockroach/pull/5845) -* Fixed several potential panics. - [#6111](https://github.com/cockroachdb/cockroach/pull/6111) - [#6143](https://github.com/cockroachdb/cockroach/pull/6143) - -

Performance Improvements

- -* Improved the performance of schema changes while data is being - modified. - [#5996](https://github.com/cockroachdb/cockroach/pull/5996) -* The RPC subsystem is now bypassed for requests to the local node. - [#6021](https://github.com/cockroachdb/cockroach/pull/6021) -* Information about completed transactions is now cleaned up more - efficiently. - [#5882](https://github.com/cockroachdb/cockroach/pull/5882) -* Fixed a leak of goroutines. - [#6010](https://github.com/cockroachdb/cockroach/pull/6010) -* Reduced memory allocations on the critical path. - [#6117](https://github.com/cockroachdb/cockroach/pull/6117) - [#6119](https://github.com/cockroachdb/cockroach/pull/6119) - [#6140](https://github.com/cockroachdb/cockroach/pull/6140) - [#6141](https://github.com/cockroachdb/cockroach/pull/6141) - [#6142](https://github.com/cockroachdb/cockroach/pull/6142) - -

Doc Improvements

- -* Overview of [SQL privileges](https://www.cockroachlabs.com/docs/v1.0/privileges). -* Summaries and required privileges for all supported [SQL statements](https://www.cockroachlabs.com/docs/v1.0/sql-statements). -* Reference docs for [`DROP DATABASE`](https://www.cockroachlabs.com/docs/v1.0/drop-database), [`DROP TABLE`](https://www.cockroachlabs.com/docs/v1.0/drop-table), [`GRANT`](https://www.cockroachlabs.com/docs/v1.0/grant), [`RENAME DATABASE`](https://www.cockroachlabs.com/docs/v1.0/rename-database), [`RENAME TABLE`](https://www.cockroachlabs.com/docs/v1.0/rename-table), [`REVOKE`](https://www.cockroachlabs.com/docs/v1.0/revoke), and [`SHOW INDEX`](https://www.cockroachlabs.com/docs/v1.0/show-index). - -

Contributors

- -This release includes 74 merged PRs by 21 authors. We would like to -thank the following contributors from the CockroachDB community, -especially first-time contributor -[Andrew NS Yeow](https://github.com/cockroachdb/cockroach/pull/6109). - -* Andrew NS Yeow -* Kenji Kaneda -* Kenjiro Nakayama -* Lu Guanqun -* Seif Lotfy diff --git a/src/current/_includes/releases/v1.0/beta-20160428.md b/src/current/_includes/releases/v1.0/beta-20160428.md deleted file mode 100644 index be6e04630cf..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160428.md +++ /dev/null @@ -1,64 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -* Time zone offsets are no longer shown when querying columns of type - `TIMESTAMP`; the new type `TIMESTAMP WITH TIME ZONE` should be used - instead. This is more consistent with other databases. - [#6165](https://github.com/cockroachdb/cockroach/pull/6165) - -

New Features

- -* `INTERVAL` values accept two new formats: ISO8601 (`P2H30M`) and the - PostgreSQL format (`2 hours 30 minutes`) in addition to the existing - Go style (`2h30m`). - [#6216](https://github.com/cockroachdb/cockroach/pull/6216) -* Tuples can now be compared in SQL expressions. - [#6217](https://github.com/cockroachdb/cockroach/pull/6217) -* The server now implements the `systemd` `NOTIFY_SOCKET` protocol. - [#6268](https://github.com/cockroachdb/cockroach/pull/6268) -* The new flag `cockroach start --background` can be used to start a - server in the background. This is better than appending `&` because - the process doesn't return until it is ready to receive traffic. - [#6268](https://github.com/cockroachdb/cockroach/pull/6268) - -

Bug fixes

- -* The SQL shell can once again be suspended with `ctrl-z`. - [#6171](https://github.com/cockroachdb/cockroach/pull/6171) -* Fixed an error in the SQL shell when the history file contains empty - lines. [#6192](https://github.com/cockroachdb/cockroach/pull/6192) -* The `--store` and `--key-size` command-line flags are no longer - marked as required. - [#6229](https://github.com/cockroachdb/cockroach/pull/6229) -* The command line interface is better about printing errors to - `stderr`, not just the log file. - [#6258](https://github.com/cockroachdb/cockroach/pull/6258) -* The `INFO` log file is now created in the correct default location - (`cockroach-data/logs`) instead of `$TMPDIR`. - [#6265](https://github.com/cockroachdb/cockroach/pull/6265) -* Fixed errors that could be introduced by updating tables while a - schema change is in progress. - [#6160](https://github.com/cockroachdb/cockroach/pull/6160) -* Fixed several potential panics. - [#6187](https://github.com/cockroachdb/cockroach/pull/6187), - [#6235](https://github.com/cockroachdb/cockroach/pull/6235), - [#6242](https://github.com/cockroachdb/cockroach/pull/6242) - -

Performance Improvements

- -* Raft processing is no longer blocked while generating a snapshot for - a new replica. - [#6253](https://github.com/cockroachdb/cockroach/pull/6253) - -

Contributors

- -This release includes 89 merged PRs by 19 authors. We would like to -thank the following contributors from the CockroachDB community, -especially first-time contributor -[Karl Southern](https://github.com/cockroachdb/cockroach/pull/4858). - -* Karl Southern -* Kenjiro Nakayama diff --git a/src/current/_includes/releases/v1.0/beta-20160505.md b/src/current/_includes/releases/v1.0/beta-20160505.md deleted file mode 100644 index 0b5a90b3485..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160505.md +++ /dev/null @@ -1,76 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

New Features

- -* New SQL command `UPSERT` is available. This is syntactically similar - to `INSERT` but will update the row with the supplied columns if - there is a conflicting row for the primary key. - [#6456](https://github.com/cockroachdb/cockroach/pull/6456) -* The SQL type system has been overhauled, reducing the number of - explicit casts required. - [RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160203_typing.md) -* Tuples are now considered for index selection, so `SELECT * FROM t - WHERE (a, b) > ($1, $2)` can use an index defined on `(a, b)`. - [#6332](https://github.com/cockroachdb/cockroach/pull/6332) - -

Performance improvements

- -* Bulk inserts to previously-empty table spans are now faster. - [#6375](https://github.com/cockroachdb/cockroach/pull/6375), - [#6414](https://github.com/cockroachdb/cockroach/pull/6414) -* Ranges that have grown above the target size are now split before - being rebalanced, reducing memory usage. - [#6447](https://github.com/cockroachdb/cockroach/pull/6447) -* Reduced rapid memory growth during partitions. - [#6448](https://github.com/cockroachdb/cockroach/pull/6448) -* [`ALTER TABLE`](https://www.cockroachlabs.com/docs/v1.0/alter-table) now performs backfill operations in chunks. - [#6056](https://github.com/cockroachdb/cockroach/pull/6056) -* [`DROP TABLE`](https://www.cockroachlabs.com/docs/v1.0/drop-table) now uses the schema change mechanism intead of - synchronously deleting all data. - [#6336](https://github.com/cockroachdb/cockroach/pull/6336) -* Scanning over records that have only one MVCC version is faster. - [#6351](https://github.com/cockroachdb/cockroach/pull/6351) -* The command queue now uses coarse-grained spans until a conflict has - occurred. - [#6412](https://github.com/cockroachdb/cockroach/pull/6412) -* When a replica is rebalanced from one node to another, the old node - no longer triggers unnecessary Raft elections. - [#6423](https://github.com/cockroachdb/cockroach/pull/6423) - -

Production

- -* Servers now attempt to drain more gracefully when restarted. - [#6313](https://github.com/cockroachdb/cockroach/pull/6313) -* Metrics on lease requests are now available internally. - [#5596](https://github.com/cockroachdb/cockroach/pull/5596) -* The RocksDB cache will no longer use all available memory if - `cgroups` reports a limit higher than physical memory. - [#6379](https://github.com/cockroachdb/cockroach/pull/6379) - -

Bug Fixes

- -* The range descriptor cache is now invalidated correctly, fixing a - bug in which requests could be retried repeatedly on the wrong node. - [#6425](https://github.com/cockroachdb/cockroach/pull/6425) -* Fixed a bug with some queries using aggregate functions like `MIN()` - and `MAX()`. [#6380](https://github.com/cockroachdb/cockroach/pull/6380) -* Tuple comparisons now work correctly when one tuple contains `NULL`. - [#6370](https://github.com/cockroachdb/cockroach/pull/6370) - -

Doc Updates

- -* Community-supported docs and configuration files for [running CockroachDB inside a single VirtualBox virtual machine](http://uptimedba.github.io/cockroach-vb-single/cockroach-vb-single/home.html). [#263](https://github.com/cockroachdb/docs/pull/263) -* Docs on using [environment variables](https://www.cockroachlabs.com/docs/v1.0/cockroach-commands#environment-variables) as default values for command-line flags. [#235](https://github.com/cockroachdb/docs/pull/235) -* Docs on using the [`version`](https://www.cockroachlabs.com/docs/v1.0/view-version-details) and [`node`](https://www.cockroachlabs.com/docs/v1.0/view-node-details) commands. - -

Contributors

- -This release includes 66 merged PRs by 18 authors. We would like to -thank the following contributors from the CockroachDB community, especially first-time contributor [Paul Steffensen](https://github.com/uptimeDBA). - -* Bogdan Batog -* il9ue -* Kenji Kaneda -* Paul Steffensen diff --git a/src/current/_includes/releases/v1.0/beta-20160512.md b/src/current/_includes/releases/v1.0/beta-20160512.md deleted file mode 100644 index 4b5bc60258b..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160512.md +++ /dev/null @@ -1,47 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Upgrade Procedure

- -- This release cannot be run concurrently with older beta releases. Please stop all nodes running older releases before restarting any node with this version. - -

New Features

- -- The [`INSERT`](https://www.cockroachlabs.com/docs/v1.0/insert) statement now accepts an `ON CONFLICT` clause, for a more flexible alternative to `UPSERT`. [#6591](https://github.com/cockroachdb/cockroach/pull/6591) -- The new [`EXPLAIN (TYPES)`](https://www.cockroachlabs.com/docs/v1.0/explain) subcommand prints information about the types of expressions in a statement. [#6482](https://github.com/cockroachdb/cockroach/pull/6482) -- Added the aggregate functions [`BOOL_AND()`](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) and [`BOOL_OR()`](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators). [#6535](https://github.com/cockroachdb/cockroach/pull/6535) -- The aggregate functions [`SUM(INT)`](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) and [`AVG(INT)`](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) now return `DECIMAL` instead of `INT` and `FLOAT` respectively. [#6532](https://github.com/cockroachdb/cockroach/pull/6532) -- The new command-line flag [`--raft-tick-interval`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) allows the raft heartbeat frequency to be changed in high-latency deployments. [#6615](https://github.com/cockroachdb/cockroach/pull/6615) -- [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp) values can now be cast to type `STRING`. [#6605](https://github.com/cockroachdb/cockroach/pull/6605) -- The [`CREATE DATABASE`](https://www.cockroachlabs.com/docs/v1.0/create-database) statement now accepts an `ENCODING` option for compatibility with PostgreSQL, although `UTF8` is the only supported encoding. [#6614](https://github.com/cockroachdb/cockroach/pull/6614) - -

Bug Fixes

- -- Fixed a bug when a single [`INSERT`](https://www.cockroachlabs.com/docs/v1.0/insert) statement inserts multiple values for the same primary key. [#6564](https://github.com/cockroachdb/cockroach/pull/6564) -- Fixed an inconsistency that could occur when a transaction in [`SNAPSHOT` isolation](https://www.cockroachlabs.com/docs/v1.0/transactions#isolation-levels) used the internal `DeleteRange` operation. [#6548](https://github.com/cockroachdb/cockroach/pull/6548) -- Fixed an integer underflow that could result in log messages like `transport: http2Server received 4294965918-bytes`. [#6567](https://github.com/cockroachdb/cockroach/pull/6567) -- Long-running transactions are no longer allowed to exceed their table descriptor lease. [#6418](https://github.com/cockroachdb/cockroach/pull/6418) -- Subtracting an `INTERVAL` from a `TIMESTAMP WITH TIME ZONE` returns a `TIMESTAMP WITH TIME ZONE` instead of one without a time zone. [#6540](https://github.com/cockroachdb/cockroach/pull/6540) -- Improved type checking of comparisons involving tuples. [#6517](https://github.com/cockroachdb/cockroach/pull/6517) -- [`CREATE TABLE`](https://www.cockroachlabs.com/docs/v1.0/create-table), [`ALTER TABLE`](https://www.cockroachlabs.com/docs/v1.0/alter-table), and [`DROP INDEX`](https://www.cockroachlabs.com/docs/v1.0/drop-index) now do more validation of their arguments. [#6492](https://github.com/cockroachdb/cockroach/pull/6492) - -

Performance improvments

- -- Improved tracking of keys modified during a transaction, improving performance of bulk inserts. [#6611](https://github.com/cockroachdb/cockroach/pull/6611) -- Schema changes can now continue past transient errors. [#6552](https://github.com/cockroachdb/cockroach/pull/6552) - -

Doc Updates

- -- The new [Tech Talks](https://www.cockroachlabs.com/community/tech-talks/) page links to recordings and slides from talks by CockroachDB founders and engineers. -- Docs for the [built-in SQL client](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) now demonstrate various ways to execute SQL statements from the command line and run external commands from the interactive shell. -- [Data type](https://www.cockroachlabs.com/docs/v1.0/data-types) docs now provide details about storage size. -- Although it's not possible to access CockroachDB's key-value store directly, the [FAQ](https://www.cockroachlabs.com/docs/v1.0/frequently-asked-questions#can-i-use-cockroachdb-as-a-key-value-store) now suggests a SQL alternative. -- [Replication recommendations](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones#node-replica-recommendations) now address cross-datacenter scenarios. - -

Contributors

- -This release includes 87 merged PRs by 18 authors. We would like to -thank the following contributor from the CockroachDB community: - -* Kenji Kaneda diff --git a/src/current/_includes/releases/v1.0/beta-20160519.md b/src/current/_includes/releases/v1.0/beta-20160519.md deleted file mode 100644 index 49bfbf3b121..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160519.md +++ /dev/null @@ -1,45 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -- [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp) values are now truncated to microsecond resolution when sent over the network for compatibility with the PostgreSQL protocol. The `format_timestamp_ns(ts)` or `extract(nanosecond from ts)` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) can be used to access the full nanosecond precision of a timestamp. The `now()`, `current_timestamp()`, and `statement_timestamp()` functions are truncated to microsecond resolution to avoid confusion when timestamp values are used in unique indexes; the new `current_timestamp_ns()` function can be used to get a non-truncated timestamp. [#6604](https://github.com/cockroachdb/cockroach/pull/6604) - -

New Features

- -- `DO NOTHING` is now supported in [`INSERT ... ON CONFLICT`](https://www.cockroachlabs.com/docs/v1.0/insert) statements. [#6633](https://github.com/cockroachdb/cockroach/pull/6633) -- [`CHECK`](https://www.cockroachlabs.com/docs/v1.0/check) constraints can now be specified at the table level. [#6625](https://github.com/cockroachdb/cockroach/pull/6625) -- Binary encoding is now supported in the network protocol for `BOOL`, `FLOAT`, `DECIMAL`, and `STRING` types, improving compatiblity with some PostgreSQL drivers. [#6661](https://github.com/cockroachdb/cockroach/pull/6661) -- The new `cockroach freeze-cluster` [command](https://www.cockroachlabs.com/docs/v1.0/cockroach-commands) has been added to the command-line interface; it will be used in the upgrade process for a future beta release. [#6675](https://github.com/cockroachdb/cockroach/pull/6675) - -

Bug Fixes

- -- [`EXPLAIN DELETE`](https://www.cockroachlabs.com/docs/v1.0/explain) no longer executes the `DELETE` statement. [#6622](https://github.com/cockroachdb/cockroach/pull/6622) -- [`CHECK`](https://www.cockroachlabs.com/docs/v1.0/check) constraints are now enforced during [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) statements. [#6753](https://github.com/cockroachdb/cockroach/pull/6753) -- [`CHECK`](https://www.cockroachlabs.com/docs/v1.0/check) constraints now work correctly when columns have been dropped. [#6730](https://github.com/cockroachdb/cockroach/pull/6730) -- Tuples and the `DEFAULT` keyword now work in [`INSERT ... ON CONFLICT DO UPDATE`](https://www.cockroachlabs.com/docs/v1.0/insert) statements. [#6636](https://github.com/cockroachdb/cockroach/pull/6636) -- Errors are now reported with standard PostgreSQL error codes in more places. [#6652](https://github.com/cockroachdb/cockroach/pull/6652), [#6554](https://github.com/cockroachdb/cockroach/pull/6554) -- Fixed a panic while a node is attempting a clean shutdown. [#6677](https://github.com/cockroachdb/cockroach/pull/6677) -- Internal retry logic has been improved so requests are no longer stuck retrying endlessly on the wrong replica. [#6688](https://github.com/cockroachdb/cockroach/pull/6688) - -

Performance Improvements

- -- Snapshot generation is now throttled to reduce peak memory consumption. - [#6632](https://github.com/cockroachdb/cockroach/pull/6632) -- Improved performance for transactions with low contention. [#6413](https://github.com/cockroachdb/cockroach/pull/6413) -- Improved performance for `UPSERT` statements (and the equivalent [`INSERT ... ON CONFLICT DO UPDATE`](https://www.cockroachlabs.com/docs/v1.0/insert) statements) when the table has no secondary indexes and values are supplied for all columns in the table. [#6673](https://github.com/cockroachdb/cockroach/pull/6673) - -

Doc Updates

- -- [SQL Grammar](https://www.cockroachlabs.com/docs/v1.0/sql-grammar) diagrams have been streamlined to reduce unnecessary duplication. -- Docs on SQL [Constraints](https://www.cockroachlabs.com/docs/v1.0/constraints) are now available. [#280](https://github.com/cockroachdb/docs/pull/280) -- Docs on the [`CREATE TABLE`](https://www.cockroachlabs.com/docs/v1.0/create-table) statement are now available. [#296](https://github.com/cockroachdb/docs/pull/296) - -

Contributors

- -This release includes 65 merged PRs by 18 authors. We would like to -thank the following contributor from the CockroachDB community: - -* Kenji Kaneda -* Paul Steffensen diff --git a/src/current/_includes/releases/v1.0/beta-20160526.md b/src/current/_includes/releases/v1.0/beta-20160526.md deleted file mode 100644 index 50c3a9d2665..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160526.md +++ /dev/null @@ -1,37 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -- Numeric literals containing a decimal point are now treated as type [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) instead of type [`FLOAT`](https://www.cockroachlabs.com/docs/v1.0/float), unless type inference determines that `FLOAT` should be used. In some cases, an explicit `CAST(x AS FLOAT)` may be needed. [#6752](https://github.com/cockroachdb/cockroach/pull/6752) -- The custom error codes `CR000` (indicating a transaction needs to be [retried](https://www.cockroachlabs.com/docs/v1.0/transactions#transaction-retries)) and `CR001` (indicating a transaction is in an invalid state) have been replaced with the PostgreSQL standard error codes `40001` (serialization failure) and `25000` (invalid transaction state), respectively. [#6797](https://github.com/cockroachdb/cockroach/pull/6797) - -

Bug Fixes

- -- Fixed problems when tables are renamed or dropped and recreated in rapid succession. [#6595](https://github.com/cockroachdb/cockroach/pull/6595) -- [`DROP DATABASE`](https://www.cockroachlabs.com/docs/v1.0/drop-database) now works correctly with quoted names. [#6851](https://github.com/cockroachdb/cockroach/pull/6851) -- Gracefully shutting down a node now completes faster. [#6777](https://github.com/cockroachdb/cockroach/pull/6777) -- [`INSERT ... ON CONFLICT DO NOTHING`](https://www.cockroachlabs.com/docs/v1.0/insert) no longer crashes the server when no conflicting index is given. [#6795](https://github.com/cockroachdb/cockroach/pull/6795) -- [`INSERT ... ON CONFLICT DO NOTHING`](https://www.cockroachlabs.com/docs/v1.0/insert) now requires only the `INSERT` permission instead of both `INSERT` and `UPDATE`. [#6827](https://github.com/cockroachdb/cockroach/pull/6827) -- Numeric literals in scientific notation are now case-insensitive. [#6864](https://github.com/cockroachdb/cockroach/pull/6864) -- `TIMESTAMP WITHOUT TIME ZONE` is now recognized as a synonym for [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp). [#6888](https://github.com/cockroachdb/cockroach/pull/6888) -- Attempting to access a database that does not exist now returns PostgreSQL error code `3D000` ("invalid catalog name") instead of a generic error. [#6680](https://github.com/cockroachdb/cockroach/pull/6680) - -

Internal Changes

- -- Most of the `/_status/` HTTP endpoints now use GRPC internally. [#6702](https://github.com/cockroachdb/cockroach/pull/6702) [#6788](https://github.com/cockroachdb/cockroach/pull/6788) -- The `cockroach exterminate` command (which did not work) has been removed. [#6780](https://github.com/cockroachdb/cockroach/pull/6780) -- Garbage collection now retains the first value outside the configured interval, since it was the current value at the start of the interval. [#6778](https://github.com/cockroachdb/cockroach/pull/6778) - -

Doc Updates

- -- Docs on the [`INSERT`](https://www.cockroachlabs.com/docs/v1.0/insert) statement are now available. [#308](https://github.com/cockroachdb/docs/pull/308) -- Docs on the [`UPSERT`](https://www.cockroachlabs.com/docs/v1.0/upsert) statement are now available. [#308](https://github.com/cockroachdb/docs/pull/308) - -

Contributors

- -This release includes 58 merged PRs by 16 authors. We would like to -thank the following contributor from the CockroachDB community: - -* Kenji Kaneda diff --git a/src/current/_includes/releases/v1.0/beta-20160602.md b/src/current/_includes/releases/v1.0/beta-20160602.md deleted file mode 100644 index 9bbc4c58a24..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160602.md +++ /dev/null @@ -1,34 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

New Features

- -- String literals can now be parsed as [`DATE`](https://www.cockroachlabs.com/docs/v1.0/date), [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp), [`TIMESTAMPTZ`](https://www.cockroachlabs.com/docs/v1.0/timestamp), or [`INTERVAL`](https://www.cockroachlabs.com/docs/v1.0/interval) without an explicit cast. [#6925](https://github.com/cockroachdb/cockroach/pull/6925) -- Floor division is now supported with a new [operator](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#operators), `//`. [#6642](https://github.com/cockroachdb/cockroach/pull/6642) -- Sub-queries are now allowed in `LIMIT`, `OFFSET`, and `RETURNING` expressions. [#6735](https://github.com/cockroachdb/cockroach/pull/6735) - -

Bug Fixes

- -- Fixed a missing error check that could result in inconsistencies when transactions conflict. [#6899](https://github.com/cockroachdb/cockroach/pull/6899) - -

Performance Improvements

- -- Improved performance of one-phase transactions. [#6857](https://github.com/cockroachdb/cockroach/pull/6857), [#6861](https://github.com/cockroachdb/cockroach/pull/6861) -- Improved the ability of `MIN()` and `MAX()` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) to detect the ordering of the data and read only a single row. [#6891](https://github.com/cockroachdb/cockroach/pull/6891) - -

Doc Updates

- -- Added a tutorial on [building an Application with CockroachDB and SQLAlchemy](https://www.cockroachlabs.com/blog/building-application-cockroachdb-sqlalchemy-2/) -- Added docs on [how CockroachDB handles `NULL` values](https://www.cockroachlabs.com/docs/v1.0/null-handling) in various contexts. [#333](https://github.com/cockroachdb/docs/pull/333) -- Improved guidance on [Contributing to CockroachDB docs](https://github.com/cockroachdb/docs/blob/master/CONTRIBUTING.md). [#344](https://github.com/cockroachdb/docs/pull/344) -- Improved [zone configuration examples](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones#basic-examples). [#327](https://github.com/cockroachdb/docs/pull/327) - -

Contributors

- -This release includes 55 merged PRs by 18 authors. We would like to -thank the following contributors from the CockroachDB community, especially first-time contributors, Sean Loiselle and Thonakom Sangnetra: - -- Sean Loiselle -- Thanakom Sangnetra -- Paul Steffensen diff --git a/src/current/_includes/releases/v1.0/beta-20160609.md b/src/current/_includes/releases/v1.0/beta-20160609.md deleted file mode 100644 index c4d563c5b78..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160609.md +++ /dev/null @@ -1,38 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

New Features

- -- The [`SERIAL`](https://www.cockroachlabs.com/docs/v1.0/serial) column type is now supported as an alias for `INT DEFAULT unique_rowid()`. In CockroachDB, this type defaults to a unique 64-bit signed integer that is the combination of the insert timestamp and the ID of the node executing the insert. It therefore differs from similar types in PostgreSQL and MySQL, which auto-increment integers in an approximate sequence. [#7032](https://github.com/cockroachdb/cockroach/pull/7032) -- For client-side [transaction retries](https://www.cockroachlabs.com/docs/v1.0/transactions#transaction-retries), reissuing the original `SAVEPOINT cockroach_restart` now has the same effect as `ROLLBACK TO SAVEPOINT cockroach_restart`. This improves compatibility with some client drivers. [#6955](https://github.com/cockroachdb/cockroach/pull/6955) -- `FAMILY` is now a reserved word in the SQL dialect in preparation for a future feature. [#7069](https://github.com/cockroachdb/cockroach/pull/7069) - -

Bug Fixes

- -- The [command-line SQL shell](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) is now able to print non-ASCII characters. [#7045](https://github.com/cockroachdb/cockroach/pull/7045), [#7048](https://github.com/cockroachdb/cockroach/pull/7048) -- Commands issued with a timestamp outside the garbage collection window will now always fail. [#6992](https://github.com/cockroachdb/cockroach/pull/6992) -- All aggregate functions now return `NULL` when run on a table with no rows. [#7043](https://github.com/cockroachdb/cockroach/pull/7043) - -

Performance Improvements

- -- jemalloc is now used as the memory allocator for the C++ portions of the server. The build tag `stdmalloc` can be used to switch back to the standard `malloc`. [#7006](https://github.com/cockroachdb/cockroach/pull/7006) -- Raft logs are now garbage collected more aggressively. This should generally make snapshots smaller, reducing memory pressure, but sometimes it may cause more snapshots to be sent. [#7040](https://github.com/cockroachdb/cockroach/pull/7040) -- Reduced allocations in [`INSERT`](https://www.cockroachlabs.com/docs/v1.0/insert) and [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) statements involving secondary indexes. [#7011](https://github.com/cockroachdb/cockroach/pull/7011) -- Improved performance of conditional puts with no existing value. [#7016](https://github.com/cockroachdb/cockroach/pull/7016) -- Improved performance of small transactions. [#7015](https://github.com/cockroachdb/cockroach/pull/7015) - -

Doc Updates

- -- The [Build an App](https://www.cockroachlabs.com/docs/v1.0/build-a-rust-app-with-cockroachdb) tutorial now demonstrates connecting to CockroachDB from the [Rust Postgres driver](https://www.cockroachlabs.com/docs/v1.0/install-client-drivers). -- Docs on the [`CREATE TABLE`](https://www.cockroachlabs.com/docs/v1.0/create-table) statement now offer both basic and expanded grammar diagrams. [#347](https://github.com/cockroachdb/docs/pull/347) -- Docs on [constraints](https://www.cockroachlabs.com/docs/v1.0/constraints) now include diagrams of all table-level and row-level constraints. [#361](https://github.com/cockroachdb/docs/pull/361) - -

Contributors

- -This release includes 56 merged PRs by 17 authors. We would like to -thank the following contributors from the CockroachDB community, especially first-time contributor Alex Robinson: - -- Alex Robinson -- Kenji Kaneda -- Paul Steffensen diff --git a/src/current/_includes/releases/v1.0/beta-20160616.md b/src/current/_includes/releases/v1.0/beta-20160616.md deleted file mode 100644 index 00466167097..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160616.md +++ /dev/null @@ -1,46 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Deprecation Notice

- -- Integers with a leading zero (e.g., `0755`) are currently treated as octal. In a future release, leading zeros will be ignored and the numbers will be treated as decimal. [#7205](https://github.com/cockroachdb/cockroach/pull/7205) - -

New Features

- -- `SELECT` statements now accept an `AS OF SYSTEM TIME` clause to read values from the recent past (up to the GC policy set in the [zone configuration](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones)). [#7139](https://github.com/cockroachdb/cockroach/pull/7139) -- [Hexadecimal byte string literals](https://www.cockroachlabs.com/docs/v1.0/string) of the form `x'f00d'` can now be used to create byte strings containing arbitrary characters. [#7138](https://github.com/cockroachdb/cockroach/pull/7138) -- `BIGSERIAL` and `SMALLSERIAL` are now supported as aliases for [`SERIAL`](https://www.cockroachlabs.com/docs/v1.0/serial). All three types use the same 64-bit value size. [#7187](https://github.com/cockroachdb/cockroach/pull/7187) - -

Bug Fixes

- -- Fixed a panic in some uses of subqueries. [#6994](https://github.com/cockroachdb/cockroach/pull/6994), [#7146](https://github.com/cockroachdb/cockroach/pull/7146) -- [`ALTER TABLE ADD COLUMN`](https://www.cockroachlabs.com/docs/v1.0/alter-table) now allows the addition of columns with a uniqueness constraint only when the table is empty. [#7094](https://github.com/cockroachdb/cockroach/pull/7094) -- Improved availability when [`--raft-tick-interval`](https://www.cockroachlabs.com/docs/v1.0/start-a-node#flags) is increased from its default value. [#7086](https://github.com/cockroachdb/cockroach/pull/7086) -- Improved reporting of errors from single statements run outside transactions. [#7080](https://github.com/cockroachdb/cockroach/pull/7080) -- Invalid octal literals (e.g., `09`) no longer crash the server. [#7134](https://github.com/cockroachdb/cockroach/pull/7134) -- When [`ALTER TABLE`](https://www.cockroachlabs.com/docs/v1.0/alter-table) fails, it now rolls back dependent items in the correct order. [#6789](https://github.com/cockroachdb/cockroach/pull/6789) -- [Aggregate functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions) like `SUM()` are no longer allowed in `CHECK` or `DEFAULT` expressions. [#7221](https://github.com/cockroachdb/cockroach/pull/7221) - -

Performance Improvements

- -- Improved rate limiting of snapshots during rebalancing, reducing memory usage and improving availability. [#6878](https://github.com/cockroachdb/cockroach/pull/6878) -- Splitting a range that has grown beyond the target size is now faster. [#7118](https://github.com/cockroachdb/cockroach/pull/7118) -- Raft elections are now initiated lazily instead of shortly after server startup. [#7085](https://github.com/cockroachdb/cockroach/pull/7085) -- The raft log is now garbage-collected more eagerly. [#7125](https://github.com/cockroachdb/cockroach/pull/7125) -- Increased RocksDB block size, which reduces memory usage. [#7219](https://github.com/cockroachdb/cockroach/pull/7219) - -

Doc Updates

- -- Docs on the [Built-in SQL client](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) now cover pretty-printing to the standard output when executing SQL from the command line. The examples have been improved as well. [#373](https://github.com/cockroachdb/docs/pull/373) -- Docs on the [`STRING`](https://www.cockroachlabs.com/docs/v1.0/string) and [`INT`](https://www.cockroachlabs.com/docs/v1.0/int) data types now cover hexadecimal-encoded literal format. Also, the `STRING` docs now cover escape string format. [#392](https://github.com/cockroachdb/docs/pull/392) -- On [Functions and Operators](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators), functions are now identified as "built-in" or "aggregate". Also, function categorization has been improved. [#379](https://github.com/cockroachdb/docs/pull/379), [#387](https://github.com/cockroachdb/docs/pull/387) -- On [`CREATE INDEX`](https://www.cockroachlabs.com/docs/v1.0/create-index), the syntax diagram has been clarified and expanded. [#382](https://github.com/cockroachdb/docs/pull/382) - -

Contributors

- -This release includes 80 merged PRs by 19 authors. We would like to -thank the following contributors from the CockroachDB community: - -- Kenji Kaneda -- Paul Steffensen diff --git a/src/current/_includes/releases/v1.0/beta-20160629.md b/src/current/_includes/releases/v1.0/beta-20160629.md deleted file mode 100644 index a95d3deda63..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160629.md +++ /dev/null @@ -1,56 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

New Features

- -- A prototype implementation of `JOIN` (non-optimized) is now available. [#7202](https://github.com/cockroachdb/cockroach/pull/7202) -- [Column Families](https://www.cockroachlabs.com/docs/v1.0/column-families) are a new, more efficient representation of SQL tables. Each column family is a group of columns in a table that are stored as a single underlying key-value pair. New tables created with multi-column families will not be compatible with versions of CockroachDB earlier than `beta-20160629`. However, no migration of existing tables is necessary; the previous format corresponds to a separate column family for each column in the table. [#7466](https://github.com/cockroachdb/cockroach/pull/7466), [#7408](https://github.com/cockroachdb/cockroach/pull/7408), [RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20151214_sql_column_families.md) -- [`EXPLAIN`](https://www.cockroachlabs.com/docs/v1.0/explain) can now be used with `CREATE`, `DROP`, and `ALTER` statements. [#7269](https://github.com/cockroachdb/cockroach/pull/7269) -- The [built-in SQL client](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) now prints tab-separated values instead of ASCII-art tables when `stdout` is not a TTY (unless `--pretty` is used). [#7268](https://github.com/cockroachdb/cockroach/pull/7268) -- In interactive mode, the [built-in SQL client](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) now prints the number of rows at the end of a result set. [#7266](https://github.com/cockroachdb/cockroach/pull/7266) -- Prepared statements can now be deallocated with the `DEALLOCATE` command. [#7367](https://github.com/cockroachdb/cockroach/pull/7367) -- Added support for interval types in placeholders. [#7382](https://github.com/cockroachdb/cockroach/pull/7382) -- Added support for hexadecimal-encoded [`STRING`](https://www.cockroachlabs.com/docs/v1.0/string) literals. [#7138](https://github.com/cockroachdb/cockroach/pull/7138) - -

Performance Improvements

- -- The load balancing system now operates at a steadier pace, reducing spikes in memory usage and reaching equilibrium more quickly. [#7147](https://github.com/cockroachdb/cockroach/pull/7147) -- The block cache is now shared across stores on the same node. [#7496](https://github.com/cockroachdb/cockroach/pull/7496) -- Initial replication in a new cluster is now significantly faster. [#7355](https://github.com/cockroachdb/cockroach/pull/7355) -- The Raft log is now more aggressively truncated. [#7125](https://github.com/cockroachdb/cockroach/pull/7125) -- The RocksDB block-size is now set to a more reasonable value. [#7276](https://github.com/cockroachdb/cockroach/pull/7276) -- A new reservation system now ensures that there is enough free space and not too many existing reservations before trying to replicate a range to a new store. This stops the thundering herd that can occur when adding a new node to a cluster. [#7147](https://github.com/cockroachdb/cockroach/pull/7147) -- Miscellaneous performance improvements in the underlying data distribution and replication protocol. - -

Bug Fixes

- -- Fixed a bug that could cause a server crash on startup. [#7447](https://github.com/cockroachdb/cockroach/pull/7447) -- Improved the handling of `NULL` values in arithmetic and comparison operations. [#7341](https://github.com/cockroachdb/cockroach/pull/7341) -- Fixed a crash when session arguments could not be parsed. [#7231](https://github.com/cockroachdb/cockroach/pull/7231) -- Improved error messages for parts of the PostgreSQL protocol we do not support. [#7233](https://github.com/cockroachdb/cockroach/pull/7233) -- `AS OF SYSTEM TIME` can now be used in prepared statements. [#7251](https://github.com/cockroachdb/cockroach/pull/7251) -- Raft messages are no longer canceled due to unrelated errors. [#7252](https://github.com/cockroachdb/cockroach/pull/7252) -- [Constraint](https://www.cockroachlabs.com/docs/v1.0/constraints) names that are specified at the column level are now preserved. [#7271](https://github.com/cockroachdb/cockroach/pull/7271) -- When [`COMMIT`](https://www.cockroachlabs.com/docs/v1.0/commit-transaction) returns an error, the transaction is considered closed and a separate `ROLLBACK` is no longer necessary. [#7282](https://github.com/cockroachdb/cockroach/pull/7282) -- The [built-in SQL client](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) now escapes strings in a format that the SQL parser will accept. [#7294](https://github.com/cockroachdb/cockroach/pull/7294) -- Fixed issues when two snapshots were being sent simultaneously. [#7299](https://github.com/cockroachdb/cockroach/pull/7299) -- When a column is [renamed](https://www.cockroachlabs.com/docs/v1.0/rename-column), any `CHECK` constraints referring to that column are now updated. [#7311](https://github.com/cockroachdb/cockroach/pull/7311) -- When piping commands into the [built-in SQL client](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client), the last line was previously ignored. Now it will be executed if it ends with a semicolon, or report an error if it is non-empty but not a complete statement. [#7328](https://github.com/cockroachdb/cockroach/pull/7328) -- When [`cockroach quit`](https://www.cockroachlabs.com/docs/v1.0/stop-a-node) fails to drain a node, it is now forced to quit. [#7483](https://github.com/cockroachdb/cockroach/pull/7483) -- Fixed a case in which a removed replica could prevent the rebalance queue from making progress. [#7507](https://github.com/cockroachdb/cockroach/pull/7507) -- Fixed a bug that slowed down population of new replicas. [#7252](https://github.com/cockroachdb/cockroach/pull/7252) - -

Internal Changes

- -- The Admin UI has been rewritten in a new framework. [#7242](https://github.com/cockroachdb/cockroach/pull/7242) -- The Admin UI now uses serialized protocol buffers when communicating with CockroachDB servers. [#7178](https://github.com/cockroachdb/cockroach/pull/7178), [#7242](https://github.com/cockroachdb/cockroach/pull/7242) - -

Contributors

- -This release includes 160 merged PRs by 22 authors. We would like to -thank the following contributors from the CockroachDB community, especially first-time contributor [phynalle](https://github.com/cockroachdb/cockroach/pull/7361): - -- Jingguo Yao -- Kenji Kaneda -- phynalle diff --git a/src/current/_includes/releases/v1.0/beta-20160714.md b/src/current/_includes/releases/v1.0/beta-20160714.md deleted file mode 100644 index 9c0381d81e3..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160714.md +++ /dev/null @@ -1,67 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Upgrade Notes

- -- This release cannot be run concurrently with older beta releases. Please stop all nodes running older releases before restarting any node with this version. -- After running this release, it is impossible to downgrade to any release older than [`beta-20160629`](#beta-20160629). -- This release uses more open file descriptors than older releases. It is recommended to set the process's file descriptor limit to at least 10000. See [Recommended Production Settings](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings) for more details. - -

New Features

- -- The `cockroach dump` command can now be used to back up the contents of a table. The output of `dump` is a series of SQL statements that can be used to recreate the table. [#7511](https://github.com/cockroachdb/cockroach/pull/7511) -- The `ILIKE` [operator](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) for case-insensitive matching is now supported. [#7635](https://github.com/cockroachdb/cockroach/pull/7635) -- Four new [operators](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) for regular expression matches are now supported: `~` (regex match), `!~` (negated regex match), `~*` (case-insensitive regex match), `!~*` (negated case-insensitive regex matcH). [#7686](https://github.com/cockroachdb/cockroach/pull/7686) -- The new `SHOW CONSTRAINTS` statement can be used to show the constraints on a table. [#7584](https://github.com/cockroachdb/cockroach/pull/7584) -- `FOREIGN KEY` constraints can now have names. [#7627](https://github.com/cockroachdb/cockroach/pull/7627) -- The [`--http-addr`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) flag can be used to control the network interface used for the web UI. [#7475](https://github.com/cockroachdb/cockroach/pull/7475) -- [`ALTER TABLE ADD COLUMN`](https://www.cockroachlabs.com/docs/v1.0/alter-table) can now be used to create [column families](https://www.cockroachlabs.com/docs/v1.0/column-families). [#7711](https://github.com/cockroachdb/cockroach/pull/7711) -- CockroachDB can now be built on FreeBSD. See this [blog post](https://www.cockroachlabs.com/blog/critters-in-a-jar-running-cockroachdb-in-a-freebsd-jail/) for details. [#7545](https://github.com/cockroachdb/cockroach/pull/7545) -- The `uidebug` `Makefile` target, used for development of the web UI, has been replaced with the environment variable `COCKROACH_DEBUG_UI=1`. [#7601](https://github.com/cockroachdb/cockroach/pull/7601) - -

Performance Improvements

- -- [Column families](https://www.cockroachlabs.com/docs/v1.0/column-families) are now used by default, improving performance of all tables created in this release and beyond. [#7623](https://github.com/cockroachdb/cockroach/pull/7623) -- Removed replicas are now garbage collected more quickly. [#7533](https://github.com/cockroachdb/cockroach/pull/7533) -- RocksDB is now configured to use more, smaller files. This improves performance by reducing the amount of data rewritten during compactions, but increases the number of open file descriptors used by the process. [#7532](https://github.com/cockroachdb/cockroach/pull/7532) -- The server now increases its soft file descriptor limit if it is too low and this is allowed by the hard limit. [#7747](https://github.com/cockroachdb/cockroach/pull/7747) -- The raft log can now be truncated even if a replica is behind, reducing the size of snapshots that must be transmitted over the network (but somewhat increasing the likelihood that a snapshot will be needed instead of replaying the log). [#7438](https://github.com/cockroachdb/cockroach/pull/7438) -- Raft-related messages are now sent in separate streams for each range. [#7534](https://github.com/cockroachdb/cockroach/pull/7534) -- Raft commands for queries that have been abandoned by the client are no longer retried internally. [#7605](https://github.com/cockroachdb/cockroach/pull/7605) -- Table leases are now released when connections are closed, allowing schema changes to proceed without waiting for leases to expire. [#7661](https://github.com/cockroachdb/cockroach/pull/7661) - -

Bug Fixes

- -- The [command-line SQL client](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) is now smarter about semicolons and `` commands inside string literals. [#7510](https://github.com/cockroachdb/cockroach/pull/7510) -- `LIMIT` is now applied correctly on queries that use `JOIN`. [#7546](https://github.com/cockroachdb/cockroach/pull/7546) -- The "CGo Calls" graph in the web UI is now displayed as a rate instead of a cumulative total. [#7597](https://github.com/cockroachdb/cockroach/pull/7597) -- The [`cockroach quit`](https://www.cockroachlabs.com/docs/v1.0/stop-a-node) command now waits for the server to stop before returning. [#7603](https://github.com/cockroachdb/cockroach/pull/7603) -- Building CockroachDB in a Docker container on Linux with `build/builder.sh` no longer requires the container and host toolchains to be the same. [#7626](https://github.com/cockroachdb/cockroach/pull/7626) -- Fixed a deadlock in table lease acquisition. [#7504](https://github.com/cockroachdb/cockroach/pull/7504) -- The [`TIMESTAMP WITH TIME ZONE`](https://www.cockroachlabs.com/docs/v1.0/timestamp) type is now reported correctly in the network protocol. [#7642](https://github.com/cockroachdb/cockroach/pull/7642) -- [Constraint](https://www.cockroachlabs.com/docs/v1.0/constraints) names are now required to be unique. [#7629](https://github.com/cockroachdb/cockroach/pull/7629) -- The "Events" tab in the web UI now includes events for schema changes. [#7571](https://github.com/cockroachdb/cockroach/pull/7571) -- Fixed several server panics in expression normalization. [#7512](https://github.com/cockroachdb/cockroach/pull/7512) -- Fixed a data race when a transaction is abandoned by the client. [#7738](https://github.com/cockroachdb/cockroach/pull/7738) -- Fixed a scenario in which intents could not be resolved. [#7744](https://github.com/cockroachdb/cockroach/pull/7744) -- RocksDB is no longer allowed to use all available file descriptors. [#7747](https://github.com/cockroachdb/cockroach/pull/7747) -- Using `*` as an argument to functions other than `COUNT(*)` no longer causes the server to panic. [#7751](https://github.com/cockroachdb/cockroach/pull/7751) -- Fixed a deadlock during shutdown. [#7770](https://github.com/cockroachdb/cockroach/pull/7770) - -

Doc Updates

- -- Docs on [installing CockroachDB in Docker](https://www.cockroachlabs.com/docs/v1.0/install-cockroachdb) now recommend using the new Docker applications for Mac and Windows. -- The new [Start a Cluster in Docker](https://www.cockroachlabs.com/docs/v1.0/start-a-local-cluster-in-docker) page demonstrates how to run a multi-node cluster across multiple Docker containers on a single host, using Docker volumes to persist node data. -- Docs on the [`DELETE`](https://www.cockroachlabs.com/docs/v1.0/delete) statement are now available. - -

Contributors

- -This release includes 131 merged PRs by 25 authors. We would like to -thank the following contributors from the CockroachDB community, especially first-time contributor [songhao](https://github.com/cockroachdb/cockroach/pull/7692): - -- Jason E. Aten -- Jingguo Yao -- Kenji Kaneda -- Sean Loiselle -- songhao diff --git a/src/current/_includes/releases/v1.0/beta-20160721.md b/src/current/_includes/releases/v1.0/beta-20160721.md deleted file mode 100644 index e437c0b79f5..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160721.md +++ /dev/null @@ -1,45 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

New Features

- -- Metrics are now exported on `/_status/vars` in a format suitable for aggregation by Prometheus. [#7895](https://github.com/cockroachdb/cockroach/pull/7895) - -

Build Changes

- -- Go 1.6.3 and 1.7rc2 are now supported. [#7811](https://github.com/cockroachdb/cockroach/pull/7811) -- The versions of Docker built for Mac and Windows are now supported. Users of a `docker-machine` VM may need to set environment variables by hand as this case is no longer detected automatically. [#7820](https://github.com/cockroachdb/cockroach/pull/7820) - -

UI Changes

- -- The Admin UI now displays a warning when there are fewer than three nodes. [#7783](https://github.com/cockroachdb/cockroach/pull/7783) - -

Performance Improvements

- -- Writes to different ranges are now performed in parallel. [#7860](https://github.com/cockroachdb/cockroach/pull/7860) -- The first range descriptor is kept more up-to-date. [#7766](https://github.com/cockroachdb/cockroach/pull/7766) -- Ranges are now considered for splits after any replication or rebalancing change. [#7800](https://github.com/cockroachdb/cockroach/pull/7800) -- An existing table lease can now be reused without writing to the lease table as long as it has enough time left before expiration. [#7781](https://github.com/cockroachdb/cockroach/pull/7781) -- The rebalancing system now avoids moving the current leader of a range. [#7918](https://github.com/cockroachdb/cockroach/pull/7918) -- Transaction records related to splits are now garbage-collected promptly. [#7903](https://github.com/cockroachdb/cockroach/pull/7903) - -

Bug Fixes

- -- Command history works again in the [command-line SQL interface](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client). [#7818](https://github.com/cockroachdb/cockroach/pull/7818) -- The [`cockroach dump`](https://www.cockroachlabs.com/docs/v1.0/sql-dump) command now works with tables that have [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) columns that specify a precision and scale. [#7842](https://github.com/cockroachdb/cockroach/pull/7842) -- Fixed several panics when handling invalid SQL commands. [#7867](https://github.com/cockroachdb/cockroach/pull/7867) [#7868](https://github.com/cockroachdb/cockroach/pull/7868) -- [`ALTER TABLE ADD COLUMN`](https://www.cockroachlabs.com/docs/v1.0/alter-table) now supports the `IF NOT EXISTS` modifier. [#7898](https://github.com/cockroachdb/cockroach/pull/7898) -- Fixed a race in gossip status logging. [#7836](https://github.com/cockroachdb/cockroach/pull/7836) - -

Doc Updates

- -- Feedback can now be submitted from the bottom of any page of our docs. [#467](https://github.com/cockroachdb/docs/pull/467) -- Updated [Recommended Production Settings](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings) to provide OS-specific instructions for increasing the file descriptors limit. [#459](https://github.com/cockroachdb/docs/pull/459) -- Updated [INTERVAL](https://www.cockroachlabs.com/docs/v1.0/interval) to cover all supported interval formats: Golang, Traditional Postgres, and ISO 8601. [#462](https://github.com/cockroachdb/docs/pull/462) -- Updated [Start a Cluster in Docker](https://www.cockroachlabs.com/docs/v1.0/start-a-local-cluster-in-docker) to work for Docker on Windows. [#457](https://github.com/cockroachdb/docs/pull/457) - -

Contributors

- -This release includes 76 merged PRs by 21 authors. We would especially like to -thank first-time contributors [Christian Meunier](https://github.com/cockroachdb/cockroach/pull/7937) and [Dharmesh Kakadia](https://github.com/dharmeshkakadia) from the CockroachDB community. diff --git a/src/current/_includes/releases/v1.0/beta-20160728.md b/src/current/_includes/releases/v1.0/beta-20160728.md deleted file mode 100644 index 784e6c28824..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160728.md +++ /dev/null @@ -1,37 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

New Features

- -- Foreign keys can now reference multiple columns. [#8033](https://github.com/cockroachdb/cockroach/pull/8033) -- The [`dump`](https://www.cockroachlabs.com/docs/v1.0/sql-dump) command can now be run by any user with `SELECT` privilege on the table. [#7974](https://github.com/cockroachdb/cockroach/pull/7974) -- [`INTEGER`](https://www.cockroachlabs.com/docs/v1.0/int) and [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) types can now be mixed in arithmetic expressions without casts. [#7756](https://github.com/cockroachdb/cockroach/pull/7756) -- The [`--join`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) flag can now be specified multiple times, as an alternative to specifying a comma-separated list. [#7876](https://github.com/cockroachdb/cockroach/pull/7876) - -

Bug Fixes

- -- Range leases are now preserved across splits, fixing a source of inconsistent reads. [#7955](https://github.com/cockroachdb/cockroach/pull/7955) -- Fixed a panic when single-statement [transactions](https://www.cockroachlabs.com/docs/v1.0/transactions) were aborted after an automatic retry. [#8010](https://github.com/cockroachdb/cockroach/pull/8010) -- Parsing of time zone offsets is now more lenient, improving compatibility with JDBC drivers. [#7929](https://github.com/cockroachdb/cockroach/pull/7929) -- Unbounded columns are now allowed in empty [column families](https://www.cockroachlabs.com/docs/v1.0/column-families). [#7969](https://github.com/cockroachdb/cockroach/pull/7969) -- The target columns of [`INSERT`](https://www.cockroachlabs.com/docs/v1.0/insert) and [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) statements are now required to be given in unqualified form. [#7911](https://github.com/cockroachdb/cockroach/pull/7911) -- Fixed a potential deadlock in the gossip subsystem and SQL leases. [#8011](https://github.com/cockroachdb/cockroach/pull/8011), [#8019](https://github.com/cockroachdb/cockroach/pull/8019) - -

Performance Improvements

- -- Snapshots are now sent prior to the Raft configuration change, minimizing the window of reduced availability. [#7833](https://github.com/cockroachdb/cockroach/pull/7833) - -

Doc Updates

- -- Added docs on the [`cockroach dump`](https://www.cockroachlabs.com/docs/v1.0/sql-dump) command. [#472](https://github.com/cockroachdb/docs/pull/472) -- Updated [Recommended Production Settings](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings) to clarify how CockroachDB allocates file descriptors when the limit is under the recommended amount. [#480](https://github.com/cockroachdb/docs/pull/480) -- [SQL statements](https://www.cockroachlabs.com/docs/v1.0/sql-statements), [data types](https://www.cockroachlabs.com/docs/v1.0/data-types), and data definition topics are now available at-a-glance in the sidebar. [#483](https://github.com/cockroachdb/docs/pull/483) - -
- -

Contributors

- -This release includes 63 merged PRs by 17 authors. We would like to thank first-time contributor [Rushi Agrawal](https://github.com/cockroachdb/cockroach/pull/7876) from the CockroachDB community. - -
diff --git a/src/current/_includes/releases/v1.0/beta-20160829.md b/src/current/_includes/releases/v1.0/beta-20160829.md deleted file mode 100644 index 0d9b73b70c7..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160829.md +++ /dev/null @@ -1,78 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Changes

- -- CockroachDB now uses Go 1.7. [#8579](https://github.com/cockroachdb/cockroach/pull/8579) -- CockroachDB now uses RocksDB 4.9. [#8815](https://github.com/cockroachdb/cockroach/pull/8815) - -

Command-Line Interface Changes

- -- The [`cockroach zone set`](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) command no longer takes its input directly on the command line. Instead, it accepts a `--file` flag to read from a file, or `--file=-` to read from standard input. [#7953](https://github.com/cockroachdb/cockroach/pull/7953) - -

New Features

- -- `AS OF SYSTEM TIME` can now be used with `JOIN` queries. [#8198](https://github.com/cockroachdb/cockroach/pull/8198) -- The type `BIT` now works correctly (as a shorthand for `BIT(1)`). The length limit is now enforced. [#8326](https://github.com/cockroachdb/cockroach/pull/8326) -- The `SHOW` commands now only show tables that the current user has permission to access. [#8070](https://github.com/cockroachdb/cockroach/pull/8070) -- [Foreign keys](https://www.cockroachlabs.com/docs/v1.0/foreign-key) can now use a prefix of an index. [#8059](https://github.com/cockroachdb/cockroach/pull/8059) -- The standard `information_schema` database is now partially supported. New tables will be added to this database in future beta releases. [#7965](https://github.com/cockroachdb/cockroach/pull/7965), [#8119](https://github.com/cockroachdb/cockroach/pull/8119) - -

Bug Fixes

- -- Clusters with a large number of ranges no longer experience persistently broken connections on node restarts. [#8828](https://github.com/cockroachdb/cockroach/pull/8828) -- The `RENAME` command now requires the `DROP` privilege on the table or database. It is no longer possible to rename the `system` database. [#7998](https://github.com/cockroachdb/cockroach/pull/7998) -- The `repeat()` and `substr()` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) with very large numeric arguments will no longer crash the server as easily. [#8073](https://github.com/cockroachdb/cockroach/pull/8073), [#8078](https://github.com/cockroachdb/cockroach/pull/8078) -- Certain errors now cause a replica to be marked as corrupt so the corruption will not be replicated to other nodes. [#7684](https://github.com/cockroachdb/cockroach/pull/7684) -- A `CloseComplete` packet is now sent in response to a `Close` command, improving compatibility with come PostgreSQL client drivers (including the Elixir driver). [#8309](https://github.com/cockroachdb/cockroach/pull/8309) -- SQL name parsing has been improved to fix several panics and allow foreign key constraints that span databases. [#8152](https://github.com/cockroachdb/cockroach/pull/8152) -- Fixed some panics. [#8283](https://github.com/cockroachdb/cockroach/pull/8283), [#8282](https://github.com/cockroachdb/cockroach/pull/8282) -- Fixed a deadlock if a range grew too large. [#8387](https://github.com/cockroachdb/cockroach/pull/8387) -- Fixed a race in which multiple conflicting snapshots could be accepted at the same time. [#8365](https://github.com/cockroachdb/cockroach/pull/8365) -- Decimal values are now represented correctly in the binary protocol. [#8319](https://github.com/cockroachdb/cockroach/pull/8319) - -

Performance Improvements

- -- Snapshots are now sent synchronously during replica changes. This controls the rate of the replication process and prevents spikes in memory usage that often caused servers to crash. [#8613](https://github.com/cockroachdb/cockroach/pull/8613) -- Raft logs are now truncated less aggressively, reducing the chance that replication will need to send a snapshot instead of the log. [#8343](https://github.com/cockroachdb/cockroach/pull/8343), [#8629](https://github.com/cockroachdb/cockroach/pull/8629), [#8656](https://github.com/cockroachdb/cockroach/pull/8656) -- Snapshots and the raft log are now written more efficiently. [#8644](https://github.com/cockroachdb/cockroach/pull/8644) -- Raft log entries are now cached. [#8494](https://github.com/cockroachdb/cockroach/pull/8494) -- Raft groups are now created lazily at startup. [#8592](https://github.com/cockroachdb/cockroach/pull/8592) -- Raft heartbeats are now sent less often by default. [#8695](https://github.com/cockroachdb/cockroach/pull/8695) -- Improved performance and reliability of clusters with large numbers of ranges. [#8125](https://github.com/cockroachdb/cockroach/pull/8125), [#8162](https://github.com/cockroachdb/cockroach/pull/8162), [#8495](https://github.com/cockroachdb/cockroach/pull/8495) -- The heuristics used by the rebalancing system have been improved. [#8124](https://github.com/cockroachdb/cockroach/pull/8124) -- Some noisy log messages have been removed or reduced. [#8071](https://github.com/cockroachdb/cockroach/pull/8071), [#8021](https://github.com/cockroachdb/cockroach/pull/8021), [#8240](https://github.com/cockroachdb/cockroach/pull/8240), [#8292](https://github.com/cockroachdb/cockroach/pull/8292), [#8529](https://github.com/cockroachdb/cockroach/pull/8529), [#8687](https://github.com/cockroachdb/cockroach/pull/8687), [#8689](https://github.com/cockroachdb/cockroach/pull/8689) -- The gossip network reconnects more reliably after a failure. [#8128](https://github.com/cockroachdb/cockroach/pull/8128) -- RPC connections to failed nodes are now detected sooner. [#8163](https://github.com/cockroachdb/cockroach/pull/8163) -- The cache of the first range descriptor is now properly invalidated. [#8163](https://github.com/cockroachdb/cockroach/pull/8163) -- Removed replicas are now garbage-collected sooner in many cases. [#8172](https://github.com/cockroachdb/cockroach/pull/8172) -- The resolution of the block profiler has been reduced, saving CPU. [#8384](https://github.com/cockroachdb/cockroach/pull/8384) -- Range lease extensions no longer block concurrent reads. [#8352](https://github.com/cockroachdb/cockroach/pull/8352) - -

UI Changes

- -- The "capacity used" is now computed correctly. [#8048](https://github.com/cockroachdb/cockroach/pull/8048) -- The CPU and garbage-collection graphs now display averages. [#8048](https://github.com/cockroachdb/cockroach/pull/8048) -- The Databases section now includes more details. [#8364](https://github.com/cockroachdb/cockroach/pull/8364) - -

Doc Updates

- -- The new [SQL Feature Support](https://www.cockroachlabs.com/docs/v1.0/sql-feature-support) page explains which standard SQL features are supported by CockroachDB. [#550](https://github.com/cockroachdb/docs/pull/550) -- The new Features on Develop Branch page tracks features that are available on the `develop` branch but not yet on `master`. [#552](https://github.com/cockroachdb/docs/pull/552) (Page no longer available.) -- Added docs on [foreign key constraints](https://www.cockroachlabs.com/docs/v1.0/foreign-key). [#528](https://github.com/cockroachdb/docs/pull/528) -- Added docs on the [`TRUNCATE`](https://www.cockroachlabs.com/docs/v1.0/truncate) statement. [#542](https://github.com/cockroachdb/docs/pull/542) -- Updated [Recommended Production Settings](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings) to include cache size recommendations for machines running multiple applications and `systemd`-specific instructions for increasing the file descriptors limit. [#532](https://github.com/cockroachdb/docs/pull/532), [#554](https://github.com/cockroachdb/docs/pull/554) -- Fixed errors in the commands for [starting CockroachDB in Docker](https://www.cockroachlabs.com/docs/v1.0/start-a-local-cluster-in-docker). [#567](https://github.com/cockroachdb/docs/pull/567) - -
- -

Contributors

- -This release includes 280 merged PRs by 26 authors. We would like to thank the following contributors from the CockroachDB community: - -- Christian Koep -- Dolf Schimmel -- songhao - -
diff --git a/src/current/_includes/releases/v1.0/beta-20160908.md b/src/current/_includes/releases/v1.0/beta-20160908.md deleted file mode 100644 index 713baab2dd6..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160908.md +++ /dev/null @@ -1,50 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Changes

- -- CockroachDB has reverted to RocksDB 4.8. [#9118](https://github.com/cockroachdb/cockroach/pull/9118) -- The logging format has changed to include a goroutine ID. [#8977](https://github.com/cockroachdb/cockroach/pull/8977) -- CockroachDB now builds correctly when Git worktrees are used; as a consequence Git version 2.5 or newer is now required when [building from source](https://www.cockroachlabs.com/docs/v1.0/install-cockroachdb). [#9072](https://github.com/cockroachdb/cockroach/pull/9072) - -

Command-Line Interface Changes

- -- The `--from` and `--to` arguments of debugging commands now support backslash-escaped keys. [#8903](https://github.com/cockroachdb/cockroach/pull/8903) -- If any [environment variables](https://www.cockroachlabs.com/docs/v1.0/cockroach-commands#environment-variables) are used to configure the server, the names of those variables are printed to the logs. [#9069](https://github.com/cockroachdb/cockroach/pull/9069) - -

Bug fixes

- -- Fixed a bug that would result in the server locking up after a few hours on a machine or VM with only one CPU. [#8908](https://github.com/cockroachdb/cockroach/pull/8908) -- Fixed another cause of persistently broken connections on node restarts. [#8947](https://github.com/cockroachdb/cockroach/pull/8947) -- [`CREATE TABLE`](https://www.cockroachlabs.com/docs/v1.0/create-table) now reports errors correctly instead of failing silently. [#9011](https://github.com/cockroachdb/cockroach/pull/9011) -- The replica garbage collection process is no longer confused by uninitialized replicas. [#9021](https://github.com/cockroachdb/cockroach/pull/9021) -- Fixed various data races. [#8865](https://github.com/cockroachdb/cockroach/pull/8865), [#8933](https://github.com/cockroachdb/cockroach/pull/8933) -- Fixed a panic in the time-series query system. [#9038](https://github.com/cockroachdb/cockroach/pull/9038) - -

Performance Improvements

- -- Internal locking mechanisms have been refactored to improve parallelism. [#8941](https://github.com/cockroachdb/cockroach/pull/8941) -- GRPC request tracing has been disabled due to its memory cost. [#9113](https://github.com/cockroachdb/cockroach/pull/9113) -- Raft leadership is more reliably transferred to coincide with the range lease. [#8932](https://github.com/cockroachdb/cockroach/pull/8932) -- Raft snapshots are now limited to one at a time on both the receiving and sending nodes. [#8974](https://github.com/cockroachdb/cockroach/pull/8974) -- Node startup time has been improved. [#9020](https://github.com/cockroachdb/cockroach/pull/9020) -- Reduced memory usage of the timestamp cache. [#9102](https://github.com/cockroachdb/cockroach/pull/9102) -- Old replicas are now garbage-collected with higher priority. [#9019](https://github.com/cockroachdb/cockroach/pull/9019) -- Transaction records for splits and replica changes are now garbage-collected sooner. [#9036](https://github.com/cockroachdb/cockroach/pull/9036) - -

Doc Updates

- -- Added docs on [deploying CockroachDB on Google Cloud Platform GCE](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-google-cloud-platform). [#574](https://github.com/cockroachdb/docs/pull/574) -- Added docs on [orchestrating CockroachDB with Kubernetes](https://www.cockroachlabs.com/docs/v1.0/orchestrate-cockroachdb-with-kubernetes). [#584](https://github.com/cockroachdb/docs/pull/584) -- Updated [troubleshooting](https://www.cockroachlabs.com/docs/v1.0/troubleshoot) docs to cover cases when nodes will not join a cluster. [#610](https://github.com/cockroachdb/docs/pull/610) -- Each version's release notes now link to the corresponding Mac and Linux binaries. [#604](https://github.com/cockroachdb/docs/pull/604) -- Updated docs on [secure local](https://www.cockroachlabs.com/docs/v1.0/secure-a-cluster) and [secure distributed](https://www.cockroachlabs.com/docs/v1.0/manual-deployment) deployment to show how to stop nodes. [#619](https://github.com/cockroachdb/docs/pull/619) - -
- -

Contributors

- -This release includes 180 merged PRs by 17 authors. We would like to thank first-time contributor [Henry Escobar](https://github.com/HenryEscobar) from the CockroachDB community. - -
diff --git a/src/current/_includes/releases/v1.0/beta-20160915.md b/src/current/_includes/releases/v1.0/beta-20160915.md deleted file mode 100644 index ccd29e30b2b..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160915.md +++ /dev/null @@ -1,37 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Changes

- -- CockroachDB is now built with Go 1.7.1. [#9288](https://github.com/cockroachdb/cockroach/pull/9288) -- CockroachDB no longer requires Git 2.5; the minimum supported version is now 1.8. [#9325](https://github.com/cockroachdb/cockroach/pull/9325) - -

Bug Fixes

- -- Fixed an inconsistency that could occur when two conflicting transactions were assigned the same timestamp. [#9100](https://github.com/cockroachdb/cockroach/pull/9100) -- Nodes with multiple stores now export metrics in a format that works with Prometheus. [#9322](https://github.com/cockroachdb/cockroach/pull/9322) - -

Performance Improvements

- -- Improved concurrency of Raft processing so snapshots and replica garbage collection do not block other ranges. [#9176](https://github.com/cockroachdb/cockroach/pull/9176) [#9299](https://github.com/cockroachdb/cockroach/pull/9299) -- The rebalancing system is now more tolerant of small imbalances, making the range distribution more likely to reach a steady state. [#9230](https://github.com/cockroachdb/cockroach/pull/9230) -- Reduced memory used by debugging traces. [#9258](https://github.com/cockroachdb/cockroach/pull/9258) -- Ranges now become responsive sooner after startup. [#9276](https://github.com/cockroachdb/cockroach/pull/9276) -- Removed an internal timeout that caused unnecessary retry loops. [#9234](https://github.com/cockroachdb/cockroach/pull/9234) -- Try harder to ensure that the range lease and raft leadership are co-located. [#8834](https://github.com/cockroachdb/cockroach/pull/8834) -- Reduced memory used by Raft. [#9193](https://github.com/cockroachdb/cockroach/pull/9193) -- The consistency checker now uses less memory when an inconsistency is found. [#9159](https://github.com/cockroachdb/cockroach/pull/9159) -- The internal replica queues can now time out and recover from a replica that gets stuck. [#9312](https://github.com/cockroachdb/cockroach/pull/9312) -- Removed a redundant verification process that periodically scanned over all data. [#9333](https://github.com/cockroachdb/cockroach/pull/9333) - -

Doc Updates

- -- Added docs for [deploying CockroachDB on AWS](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-aws). [#640](https://github.com/cockroachdb/docs/pull/640) -- Added a "back to top" feature to improve the usability of longer pages. [#638](https://github.com/cockroachdb/docs/pull/638) -- Updated [Start a Local Cluster](https://www.cockroachlabs.com/docs/v1.0/start-a-local-cluster) to suggest manually setting each node's cache size to avoid memory errors when testing against a local cluster in a serious way. [#652](https://github.com/cockroachdb/docs/pull/652) -- Updated docs on [transaction retries](https://www.cockroachlabs.com/docs/v1.0/transactions#transaction-retries) to provide the correct error code. [#647](https://github.com/cockroachdb/docs/pull/647) - -

Contributors

- -This release includes 66 merged PRs by 17 authors. diff --git a/src/current/_includes/releases/v1.0/beta-20160929.md b/src/current/_includes/releases/v1.0/beta-20160929.md deleted file mode 100644 index 13d09089755..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20160929.md +++ /dev/null @@ -1,52 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

New Features

- -- The [`--advertise-host`](https://www.cockroachlabs.com/docs/v1.0/start-a-node#flags) flag can now be used to override the address to advertise to other CockroachDB nodes. [#9503](https://github.com/cockroachdb/cockroach/pull/9503) -- The [`--http-host`](https://www.cockroachlabs.com/docs/v1.0/start-a-node#flags) flag sets the address to bind to for HTTP requests. Together with `--http-port`, it will replace `--http-addr` in a future release. [#9573](https://github.com/cockroachdb/cockroach/pull/9573) -- More metrics are now exported about the internal garbage collection of deleted values. [#9571](https://github.com/cockroachdb/cockroach/pull/9571) -- More metrics are now exported about file descriptor usage. [#9582](https://github.com/cockroachdb/cockroach/pull/9582) -- The system now uses OpenTracing's new key-value style logging APIs. [#9578](https://github.com/cockroachdb/cockroach/pull/9578) - -

Bug Fixes

- -- Fixed an inconsistency that could occur when transactions race with garbage collection. [#9377](https://github.com/cockroachdb/cockroach/pull/9377) -- Retried `BeginTransaction` operations no longer leak internal errors to the client. [#9305](https://github.com/cockroachdb/cockroach/pull/9305) -- Brief service interruptions are now avoided after shard splits. [#9550](https://github.com/cockroachdb/cockroach/pull/9550) - -

UI Changes

- -- The UI no longer crashes when a tab is reopened after being in the background for a long time. [#9042](https://github.com/cockroachdb/cockroach/pull/9042) - -

Performance Improvements

- -- Ranges which are not receiving traffic now stop sending raft heartbeats. [#9383](https://github.com/cockroachdb/cockroach/pull/9383) -- Raft snapshots are now sent as a stream instead of one large message. [#9292](https://github.com/cockroachdb/cockroach/pull/9292) -- The rebalancer is now better about noticing under-full stores. [#9415](https://github.com/cockroachdb/cockroach/pull/9415) -- Raft messages are now sent in batches. [#9485](https://github.com/cockroachdb/cockroach/pull/9485) -- Nodes are now quicker to fail over to other replicas when one is unresponsive. [#9239](https://github.com/cockroachdb/cockroach/pull/9239) -- Dropping a table or index is now faster. [#9419](https://github.com/cockroachdb/cockroach/pull/9419) -- Re-enabled consensus-level optimizations for network i/o. [#9606](https://github.com/cockroachdb/cockroach/pull/9606) - -

Doc Updates

- -- Improved the SQL docs for granting privileges on databases and tables: [`GRANT`](https://www.cockroachlabs.com/docs/v1.0/grant). [#687](https://github.com/cockroachdb/docs/pull/687) -- Completed the SQL docs for: - - Getting user privileges on databases and tables: [`SHOW GRANTS`](https://www.cockroachlabs.com/docs/v1.0/show-grants). [#687](https://github.com/cockroachdb/docs/pull/687) - - Setting and getting the default database for a session: [`SET DATABASE`](https://www.cockroachlabs.com/docs/v1.0/set-vars) and [`SHOW DATABASE`](https://www.cockroachlabs.com/docs/v1.0/show-vars). [#671](https://github.com/cockroachdb/docs/pull/671), [#683](https://github.com/cockroachdb/docs/pull/683) - - Setting and getting the default time zone for a session: [`SET TIME ZONE`](https://www.cockroachlabs.com/docs/v1.0/set-vars) and [`SHOW TIME ZONE`](https://www.cockroachlabs.com/docs/v1.0/show-vars). [#680](https://github.com/cockroachdb/docs/pull/680) - - Changing the name of a table column or index: [`RENAME COLUMN`](https://www.cockroachlabs.com/docs/v1.0/rename-column) and [`RENAME INDEX`](https://www.cockroachlabs.com/docs/v1.0/rename-index). [#678](https://github.com/cockroachdb/docs/pull/678), [#679](https://github.com/cockroachdb/docs/pull/679) -- Updated high-level overviews of primary [CockroachDB features](https://www.cockroachlabs.com/docs/v1.0/strong-consistency) and added related links. [#699](https://github.com/cockroachdb/docs/pull/699) - -
- -

Contributors

- -This release includes 78 merged PRs by 19 authors. We would like to thank the following contributors from the CockroachDB community, especially first-time contributor Haines Chan: - -- Haines Chan -- Jingguo Yao - -
diff --git a/src/current/_includes/releases/v1.0/beta-20161006.md b/src/current/_includes/releases/v1.0/beta-20161006.md deleted file mode 100644 index 481c3ee4040..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20161006.md +++ /dev/null @@ -1,20 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -There aren't many user-visible changes in this week's release - an artifact of our recent stability efforts, which funneled user-visible changes to a secondary development branch. Our concerted stability effort is nearing its end, and we are preparing to include these features in next week's release. - -

Internal Changes

- -- Nodes now periodically compare their configured maximum clock offset and exit fatally if they find another node operating with a different configuration. [#9612](https://github.com/cockroachdb/cockroach/pull/9612) -- Internal low-level instrumentation has been added for debugging performance bottlenecks. [#9638](https://github.com/cockroachdb/cockroach/pull/9638) -- General improvements have been made to the tracing infrastructure. [#9641](https://github.com/cockroachdb/cockroach/pull/9641) - -

Doc Updates

- -- Updated various aspects of the CockroachDB [design document](https://github.com/cockroachdb/cockroach/blob/master/docs/design.md): - - [Overview](https://github.com/cockroachdb/cockroach/blob/master/docs/design.md#overview) [#9648](https://github.com/cockroachdb/cockroach/pull/9648), [#9653](https://github.com/cockroachdb/cockroach/pull/9653) - - [Lock-Free Distributed Transactions](https://github.com/cockroachdb/cockroach/blob/master/docs/design.md#lock-free-distributed-transactions) and [Logical Map Content](https://github.com/cockroachdb/cockroach/blob/master/docs/design.md#logical-map-content) [#9646](https://github.com/cockroachdb/cockroach/pull/9646) - - [Strict Serializability (Linearizability)](https://github.com/cockroachdb/cockroach/blob/master/docs/design.md#strict-serializability-linearizability) [#9644](https://github.com/cockroachdb/cockroach/pull/9644) - - [Node and Cluster Metrics](https://github.com/cockroachdb/cockroach/blob/master/docs/design.md#node-and-cluster-metrics) [#9647](https://github.com/cockroachdb/cockroach/pull/9647) - - [SQL](https://github.com/cockroachdb/cockroach/blob/master/docs/design.md#sql) [#9651](https://github.com/cockroachdb/cockroach/pull/9651) diff --git a/src/current/_includes/releases/v1.0/beta-20161013.md b/src/current/_includes/releases/v1.0/beta-20161013.md deleted file mode 100644 index 54a1fb9509e..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20161013.md +++ /dev/null @@ -1,113 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -This week's release includes many user-visible changes and features that have been in development since our [stability-focused "code yellow"](https://www.cockroachlabs.com/blog/cant-run-100-node-cockroachdb-cluster/) started back in August. When we entered "code yellow", all work unrelated to stability was done in a secondary development environment. Since then, we've made great progress, and so have moved many of these new features back into our main development environment. - -

Backwards-Incompatible Changes

- -- [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp) values are now stored with microsecond precision instead of nanoseconds. All nanosecond-related functions have been removed. An existing table `t` with nanosecond timestamps in column `s` can round them to microseconds with `UPDATE t SET s = s + '0s'`. However, note that this could potentially cause uniqueness problems if the timestamp is a primary key. [#8864](https://github.com/cockroachdb/cockroach/pull/8864) -- [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp) values (without `TIME ZONE`) are now parsed in UTC, not in the session time zone. [#9444](https://github.com/cockroachdb/cockroach/pull/9444) -- The deprecated `--http-addr` flag has been removed. When [starting a node](https://www.cockroachlabs.com/docs/v1.0/start-a-node), use the `--http-host` flag instead to specify the address to listen on for Admin UI HTTP requests. [#9725](https://github.com/cockroachdb/cockroach/pull/9725) - -

SQL Language Changes

- -- Tables can now be [interleaved](https://www.cockroachlabs.com/docs/v1.0/interleave-in-parent) into other tables. Interleaving tables improves query performance by optimizing the key-value structure of closely related tables, attempting to keep data on the same key-value range if it’s likely to be read and written together. [#7985](https://github.com/cockroachdb/cockroach/pull/7985) -- The [`CREATE TABLE AS`](https://www.cockroachlabs.com/docs/v1.0/create-table-as) statement can now be used to create a table based on the results of a `SELECT` statement. [#8802](https://github.com/cockroachdb/cockroach/pull/8802), [#9278](https://github.com/cockroachdb/cockroach/pull/9278) -- The [`ALTER TABLE ... SPLIT AT`](https://www.cockroachlabs.com/docs/v1.0/alter-table) command can now be used to force a range split at a specified key. [#8938](https://github.com/cockroachdb/cockroach/pull/8938) -- Added new `information_schema` metatables as well as initial support for the `pg_catalog` database. This work is part of our ongoing effort to provide standard database introspection required by popular ORMs. [#8433](https://github.com/cockroachdb/cockroach/pull/8433), [#8498](https://github.com/cockroachdb/cockroach/pull/8498), [#9565](https://github.com/cockroachdb/cockroach/pull/9565), [#9114](https://github.com/cockroachdb/cockroach/pull/9114), [#9104](https://github.com/cockroachdb/cockroach/pull/9104) -- [`CHECK`](https://www.cockroachlabs.com/docs/v1.0/check) constraints can now be added with the [`ALTER TABLE ADD CHECK`](https://www.cockroachlabs.com/docs/v1.0/alter-table) and `ALTER TABLE VALIDATE` statements. [#9127](https://github.com/cockroachdb/cockroach/pull/9127) [#9152](https://github.com/cockroachdb/cockroach/pull/9152) -- The [`ALTER TABLE DROP CONSTRAINT`](https://www.cockroachlabs.com/docs/v1.0/alter-table) statement can now drop [`CHECK`](https://www.cockroachlabs.com/docs/v1.0/check) and [`FOREIGN KEY`](https://www.cockroachlabs.com/docs/v1.0/foreign-key) constraints. [#8747](https://github.com/cockroachdb/cockroach/pull/8747) -- The [`SHOW CONSTRAINTS`](https://www.cockroachlabs.com/docs/v1.0/show-constraints) statement now requires the user to have privileges for the requested table. [#8658](https://github.com/cockroachdb/cockroach/pull/8658) -- The [`EXPLAIN`](https://www.cockroachlabs.com/docs/v1.0/explain) statement now works correctly for `VALUES` statements containing subqueries. [#8970](https://github.com/cockroachdb/cockroach/pull/8970) -- The [`TRUNCATE`](https://www.cockroachlabs.com/docs/v1.0/truncate) statement now implements the `CASCADE` modifier. [#9240](https://github.com/cockroachdb/cockroach/pull/9240) -- [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp) values that include a time zone can now omit the minutes field of the time zone offset, for compatibility with PostgreSQL. [#8666](https://github.com/cockroachdb/cockroach/pull/8666) -- [`INTERVAL`](https://www.cockroachlabs.com/docs/v1.0/interval) values can now be input in a colon-delimited format (H:M or H:M_S), for compatibility with PostgreSQL. [#8603](https://github.com/cockroachdb/cockroach/pull/8603) -- The `INT8` type is now supported as an alias for [`INT`](https://www.cockroachlabs.com/docs/v1.0/int). [#8858](https://github.com/cockroachdb/cockroach/pull/8858) -- [`INTERVAL`](https://www.cockroachlabs.com/docs/v1.0/interval), [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp), [`TIMESTAMPTZ`](https://www.cockroachlabs.com/docs/v1.0/timestamp), and [`DATE`](https://www.cockroachlabs.com/docs/v1.0/date) values can now be casted to/from more types. [#9731](https://github.com/cockroachdb/cockroach/pull/9731) -- The window functions `row_number()`, `rank()`, `dense_rank()`, `percent_rank()`, `cume_dist()`, `ntile()`, `lead()`, `lag()`, `first_value()`, `last_value()`, `nth_value()` are now supported. [#8928](https://github.com/cockroachdb/cockroach/pull/8928), [#9321](https://github.com/cockroachdb/cockroach/pull/9321), [#9335](https://github.com/cockroachdb/cockroach/pull/9335) -- The `exp()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) returns an error if its argument is greater than 1024 instead of performing excessively expensive computation. [#8822](https://github.com/cockroachdb/cockroach/pull/8822) -- The `round()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) now breaks ties by rounding to the nearest even value (also known as bankers' rounding). It is now faster, and returns an error when given an excessively large number of digits. [#8822](https://github.com/cockroachdb/cockroach/pull/8822) -- The `concat_agg` [aggregate function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions) is now supported. [#9690](https://github.com/cockroachdb/cockroach/pull/9690) -- New [date/time functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators), `extract_duration()`, `experimental_strptime()` and `experimental_strftime()`, are now supported. [#9734](https://github.com/cockroachdb/cockroach/pull/9734), [#9762](https://github.com/cockroachdb/cockroach/pull/9762) - -

Protocol Changes

- -- The `COPY` protocol is now supported on the server side with the `COPY FROM` statement. The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) command-line tool does not yet recognize this command but it can be used with other client interfaces. [#8756](https://github.com/cockroachdb/cockroach/pull/8756) -- [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) values are now encoded correctly for drivers that use the binary format. [#8319](https://github.com/cockroachdb/cockroach/pull/8319) -- [`TIMESTAMP`](https://www.cockroachlabs.com/docs/v1.0/timestamp), [`TIMESTAMPTZ`](https://www.cockroachlabs.com/docs/v1.0/timestamp) and [`DATE`](https://www.cockroachlabs.com/docs/v1.0/date) values can now use the binary protocol format for clients that support it. [#8590](https://github.com/cockroachdb/cockroach/pull/8590), [#8762](https://github.com/cockroachdb/cockroach/pull/8762) -- [`NULL` values](https://www.cockroachlabs.com/docs/v1.0/null-handling) are now sent with the correct type OID, which is required by some PostgreSQL drivers. [#9331](https://github.com/cockroachdb/cockroach/pull/9331) -- The [`CREATE TABLE`](https://www.cockroachlabs.com/docs/v1.0/create-table) and [`CREATE DATABASE`](https://www.cockroachlabs.com/docs/v1.0/create-database) statements now return the standard PostgreSQL error code when the table or database already exists. [#9235](https://github.com/cockroachdb/cockroach/pull/9235) - -

Command-line Interface Changes

- -- The [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node#standard-output) command now prints additional fields to `stdout`: `clusterID`, `nodeID`, and `status`, which indicates whether the node started a new cluster, joined an existing cluster for the first time, or rejoined an existing cluster. [#9066](https://github.com/cockroachdb/cockroach/pull/9066) -- The [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) command now terminates with a non-zero exit status if it was interrupted with **Ctrl+C** or a signal. [#9051](https://github.com/cockroachdb/cockroach/pull/9051) -- The [`cockroach zone set`](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) command now supports the `--disable-replication` flag, which sets the desired replication factor to 1. [#9253](https://github.com/cockroachdb/cockroach/pull/9253) -- The [`cockroach gen example-data`](https://www.cockroachlabs.com/docs/v1.0/generate-cockroachdb-resources) command can now be used to generate SQL for example databases. [#9231](https://github.com/cockroachdb/cockroach/pull/9231), [#9475](https://github.com/cockroachdb/cockroach/pull/9475) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now presents a new prompt when **Ctrl+C** is pressed after the user starts entering a statement. [#9704](https://github.com/cockroachdb/cockroach/pull/9704) -- When using **Ctrl+R** in the [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell to recall a previous statement from the shell history, the search is now case-insensitive. [#9704](https://github.com/cockroachdb/cockroach/pull/9704) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now reports unterminated final statements as errors instead of ignoring them. [#8838](https://github.com/cockroachdb/cockroach/pull/8838) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now reports when the connection to the server is lost and a new connection is opened. [#9613](https://github.com/cockroachdb/cockroach/pull/9613) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now ignores lines that contain only whitespace (e.g., comments). [#9243](https://github.com/cockroachdb/cockroach/pull/9243) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now prints expressions in [`EXPLAIN`](https://www.cockroachlabs.com/docs/v1.0/explain) and the shell history such as `E::T` or `E:::T` in the same format entered instead of using the longer `CAST` or `TYPE_ANNOTATE` syntax. [#9739](https://github.com/cockroachdb/cockroach/pull/9739) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now stops processing statements at the first error encountered when reading from a file (non-interactively). This behavior is customizable with the new [`unset errexit`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client#sql-shell-commands) shell command. [#9610](https://github.com/cockroachdb/cockroach/pull/9610) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now checks if a statement or query is syntactically valid on the client side before it is sent to the server. This ensures that a typo or mistake during user entry does not inconveniently abort an ongoing transaction previously started interactively. This behavior is customizable with the new [`unset check_syntax`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client#sql-shell-commands) shell command. [#9610](https://github.com/cockroachdb/cockroach/pull/9610) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now works better with multi-line statements. In particular, users will find it easier to recall and edit previously entered multi-line statements. This behavior is customizable with a new command [`unset normalize_history`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client#sql-shell-commands). [#9610](https://github.com/cockroachdb/cockroach/pull/9610) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell now works correctly under `kubectl` and other environments that hide the size of the terminal. [#9097](https://github.com/cockroachdb/cockroach/pull/9097) - -

UI Changes

- -- The time scale selector in the UI now works more reliably. [#8573](https://github.com/cockroachdb/cockroach/pull/8573) -- Additional database and table-level details are now available. [#9621](https://github.com/cockroachdb/cockroach/pull/9621) -- Displaying graphs for a longer timescales is now much faster. [#8805](https://github.com/cockroachdb/cockroach/pull/8805) -- The default graph time scale is now chosen based on the age of the cluster. [#9340](https://github.com/cockroachdb/cockroach/pull/9340) -- Node logs are now accessible in the UI. [#8572](https://github.com/cockroachdb/cockroach/pull/8572) -- The UI can now be built with live reload support. [#8679](https://github.com/cockroachdb/cockroach/pull/8679) -- Exposed idiomatic bucketed histograms on prometheus endpoint. [#9810](https://github.com/cockroachdb/cockroach/pull/9810) -- Exposed Mutex timing metrics. [#9769](https://github.com/cockroachdb/cockroach/pull/9769) - -

Bug Fixes

- -- Fixed some panics in handling invalid SQL statements. [#9049](https://github.com/cockroachdb/cockroach/pull/9049), [#9050](https://github.com/cockroachdb/cockroach/pull/9050) -- Fixed an issue that sometimes caused transactions to restart twice in a row. [#8596](https://github.com/cockroachdb/cockroach/pull/8596) -- [`STRING`](https://www.cockroachlabs.com/docs/v1.0/string) literals containing non-UTF-8 data are now rejected. [#9094](https://github.com/cockroachdb/cockroach/pull/9094) -- The [`UPSERT`](https://www.cockroachlabs.com/docs/v1.0/upsert) statement now works correctly during online schema changes.[#9481](https://github.com/cockroachdb/cockroach/pull/9481) -- Aggregate and window functions in `GROUP BY` ordinal expressions are now rejected without crashing. [#9629](https://github.com/cockroachdb/cockroach/pull/9629) -- The unsupported `ALTER TABLE ... ALTER COLUMN ... SET TYPE ...` statement now reports an error to the client instead of crashing the server. [#8747](https://github.com/cockroachdb/cockroach/pull/8747) -- Slightly improved support for 32-bit systems. Note that 32-bit architectures remain officially unsupported. [#9491](https://github.com/cockroachdb/cockroach/pull/9491) -- The admin UI now propagates timeouts to the server to avoid dangling requests. [#8546](https://github.com/cockroachdb/cockroach/pull/8546) -- Fixed issues with duplicate column qualifications in `CREATE TABLE` statements [#9868](https://github.com/cockroachdb/cockroach/pull/9868) -- Empty statements are now handled correctly in prepared statements. [#9811](https://github.com/cockroachdb/cockroach/pull/9811) -- Fixed a Raft assertion caused by insufficient locking. [#9814](https://github.com/cockroachdb/cockroach/pull/9814) - -

Performance Improvements

- -- Tuned RocksDB settings to reduce write stalls. [#9663](https://github.com/cockroachdb/cockroach/issues/9663) -- Minor performance optimization to avoid reading an internal metadata key that will be immediately overwritten. [#9263](https://github.com/cockroachdb/cockroach/pull/9263) -- Improved concurrency to prevent certain slow operations from dramatically impacting overall performance. [#9622](https://github.com/cockroachdb/cockroach/pull/9622) -- [`DROP INDEX`](https://www.cockroachlabs.com/docs/v1.0/drop-index) and [`DROP TABLE`](https://www.cockroachlabs.com/docs/v1.0/drop-table) are now performed in chunks instead of one big transaction. [#8870](https://github.com/cockroachdb/cockroach/pull/8870), [#8885](https://github.com/cockroachdb/cockroach/pull/8885) -- The SQL query processor now tracks its memory usage. Soon it will return errors for queries that use too much memory. [#8691](https://github.com/cockroachdb/cockroach/pull/8691) -- [Aggregate functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions) like `sum()` are now faster. [#8680](https://github.com/cockroachdb/cockroach/pull/8680) -- For [production deployments](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings), at least 10000 file descriptors are now recommended per store, with a minimum of 2000 required. [#9679](https://github.com/cockroachdb/cockroach/pull/9679) -- Improved scheduling of Raft work. [#9831](https://github.com/cockroachdb/cockroach/pull/9831) -- Replaced per-request checks with a per-batch check. [#9848](https://github.com/cockroachdb/cockroach/pull/9848) -- Micro-optimizations for writing time series data. [#9862](https://github.com/cockroachdb/cockroach/pull/9862) -- Narrowed the use of a lock during Raft log truncation. [#9840](https://github.com/cockroachdb/cockroach/pull/9840) -- Refactored a common locking pattern for better performance. [#9771](https://github.com/cockroachdb/cockroach/pull/9771) - -

Doc Updates

- -- Added docs on [orchestrating CockroachDB with Docker Swarm](https://www.cockroachlabs.com/docs/v1.0/orchestrate-cockroachdb-with-docker-swarm). [#676](https://github.com/cockroachdb/docs/pull/676) -- Added docs on the [`cockroach gen`](https://www.cockroachlabs.com/docs/v1.0/generate-cockroachdb-resources) command, which can be used to generate `man` pages, a `bash` autocompletion script, and example SQL data suitable to populate test databases. [#755](https://github.com/cockroachdb/docs/pull/755) -- Updated the `cockroach sql` command docs to include more details about supported [SQL shell shortcuts](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client#sql-shell-shortcuts). - -

Contributors

- -This release includes 338 merged PRs by 28 authors. We would like to -thank the following contributors from the CockroachDB community, especially first-time contributors Francis Bergin and Yan Long: - -- Francis Bergin -- Daniel Theophanes -- Yan Long -- songhao diff --git a/src/current/_includes/releases/v1.0/beta-20161027.md b/src/current/_includes/releases/v1.0/beta-20161027.md deleted file mode 100644 index 0b9fe3912ff..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20161027.md +++ /dev/null @@ -1,59 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -- Functions that return the current time - including `now()` - now return values of type [`TIMESTAMP WITH TIME ZONE`](https://www.cockroachlabs.com/docs/v1.0/timestamp) instead of `TIMESTAMP`, unless called from a context that expects the latter type. [#9618](https://github.com/cockroachdb/cockroach/pull/9618) - -

SQL Language Changes

- -- [Views](https://www.cockroachlabs.com/docs/v1.0/views) are now supported. Views are stored queries, represented as virtual tables, that produce a result set when requested. They can be used to hide query complexity, limit access to underlying data, or simplify the process of supporting legacy code. -- Tables in the `pg_catalog` database can now be accessed with unqualified names if no table of the same name exists in the current database. [#9927](https://github.com/cockroachdb/cockroach/pull/9927) -- `AS OF SYSTEM TIME` queries now support the decimal format returned by `cluster_logical_timestamp()` for maximum precision. [#9934](https://github.com/cockroachdb/cockroach/pull/9934) -- The `CASCADE` option of [`DROP TABLE`](https://www.cockroachlabs.com/docs/v1.0/drop-table) and [`ALTER TABLE DROP COLUMN`](https://www.cockroachlabs.com/docs/v1.0/alter-table) now drops views that depend on the table or column; it is now an error to attempt to drop a table or column with dependent views without either using this option or dropping the views first. [#9724](https://github.com/cockroachdb/cockroach/pull/9724), [#10124](https://github.com/cockroachdb/cockroach/pull/10124) -- `SET DEFAULT_TRANSACTION_ISOLATION` is now supported, improving compatibility with `psycopg2`. [#10087](https://github.com/cockroachdb/cockroach/pull/10087) -- The `pg_catalog.pg_indexes` and `pg_catalog.pg_constraint` tables are now supported. [#9869](https://github.com/cockroachdb/cockroach/pull/9869), [#9991](https://github.com/cockroachdb/cockroach/pull/9991) -- [`EXPLAIN`](https://www.cockroachlabs.com/docs/v1.0/explain) can now be used with `SHOW`, `HELP`, and `ALTER TABLE ... SPLIT`. [#10013](https://github.com/cockroachdb/cockroach/pull/10013) -- The `current_schemas()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) is now supported, improving compatibility with ActiveRecord. [#9604](https://github.com/cockroachdb/cockroach/pull/9604) -- The [`SHOW ALL`](https://www.cockroachlabs.com/docs/v1.0/show-vars) statement is now supported. [#10195](https://github.com/cockroachdb/cockroach/pull/10195) -- The [`DROP INDEX`](https://www.cockroachlabs.com/docs/v1.0/drop-index) and [`ALTER INDEX`](https://www.cockroachlabs.com/docs/v1.0/rename-index) statements can now use unqualified index names. [#10091](https://github.com/cockroachdb/cockroach/pull/10091) - -

Command-Line Interface Changes

- -- The `--host` flag in [client commands](https://www.cockroachlabs.com/docs/v1.0/cockroach-commands) such as [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) now defaults to `localhost` in both secure and insecure modes. [#10134](https://github.com/cockroachdb/cockroach/pull/10134) -- Improved error messages when a server is down or certificates are not configured correctly. [#9917](https://github.com/cockroachdb/cockroach/pull/9917) -- The `--key` flag can now be passed via the [environment variable](https://www.cockroachlabs.com/docs/v1.0/cockroach-commands#environment-variables) `COCKROACH_KEY`, matching other similar flags. [#10092](https://github.com/cockroachdb/cockroach/pull/10092) - -

Performance Improvements

- -- RocksDB bloom filters now use 10x less memory, with a negligible reduction in speed. [#10085](https://github.com/cockroachdb/cockroach/pull/10085) -- Old time-series data is now garbage collected. [#9959](https://github.com/cockroachdb/cockroach/pull/9959) -- Reduced overhead of the internal time-series metric system. [#9889](https://github.com/cockroachdb/cockroach/pull/9889) -- Reduced impact of schema changes on regular queries. [#9798](https://github.com/cockroachdb/cockroach/pull/9798) -- In the event of a node failure, schema changes now save their progress and can resume from their last checkpoint instead of restarting from the beginning. [#10036](https://github.com/cockroachdb/cockroach/pull/10036) -- Ranges are now split more eagerly when tables are created or data is growing rapidly. [#10232](https://github.com/cockroachdb/cockroach/pull/10232) - -

Bug Fixes

- -- Certain network- and timeout-related errors will now return the error "transaction commit result is ambiguous" when it cannot be determined whether a transaction committed or not. This fixes bugs in which statements outside of transactions could be applied twice, and other transactions may incorrectly report unique constraint violations. [#10207](https://github.com/cockroachdb/cockroach/pull/10207) -- [`SET TIME ZONE 0`](https://www.cockroachlabs.com/docs/v1.0/set-vars#set-time-zone) now sets the session time zone to UTC (other numbers already worked). [#9992](https://github.com/cockroachdb/cockroach/pull/9992) -- `SHOW` statements for session variables now work correctly when prepared and executed separately. [#10013](https://github.com/cockroachdb/cockroach/pull/10013) -- Columns that are a part of the primary key can now be renamed. [#10018](https://github.com/cockroachdb/cockroach/pull/10018) -- Fixed a panic during transaction rollback. [#9961](https://github.com/cockroachdb/cockroach/pull/9961) -- Float comparisons involving `NaN` are now correct in all cases. [#10112](https://github.com/cockroachdb/cockroach/pull/10112) -- Clock offset monitoring is now more sensitive. [#10185](https://github.com/cockroachdb/cockroach/pull/10185) -- The rebalancer is now better able to avoid placing a replica on a store that has previously had a corrupted replica of the same range. [#10141](https://github.com/cockroachdb/cockroach/pull/10141) -- More complex expressions involving window functions are now supported. [#10186](https://github.com/cockroachdb/cockroach/pull/10186) -- Fixed a deadlock that could occur when using the Prometheus metrics endpoint. [#10228](https://github.com/cockroachdb/cockroach/pull/10228) - -
- -

Contributors

- -This release includes 182 merged PRs by 24 authors. We would like to thank the following contributors from the CockroachDB, including first-time contributor Haines Chan. - -- Haines Chan -- songhao - -
diff --git a/src/current/_includes/releases/v1.0/beta-20161103.md b/src/current/_includes/releases/v1.0/beta-20161103.md deleted file mode 100644 index 8ceb2d2fea8..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20161103.md +++ /dev/null @@ -1,62 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -- Users (other than `root`) must now be created with the `CREATE USER` statement or `cockroach user set` command before they can be used for TLS certificate- or password-based authentication. [#9794](https://github.com/cockroachdb/cockroach/pull/9794) -- The `ambiguous result` error message introduced in `beta-20161027` is now simply `result is ambiguous` since it may be returned outside of transactions. [#10279](https://github.com/cockroachdb/cockroach/pull/10279) - -

SQL Language Changes

- -- Password-based authentication is now supported. Clients can authenticate with either a TLS certificate or a password. Documentation coming soon.[#9794](https://github.com/cockroachdb/cockroach/pull/9794) -- The `SHOW USERS` statement is now supported. [#10088](https://github.com/cockroachdb/cockroach/pull/10088) -- The `pg_catalog.pg_type`, `pg_catalog.pg_database`, `pg_catalog.pg_views`, `pg_catalog.pg_proc`, and `pg_catalog.pg_am` tables are now supported. [#10209](https://github.com/cockroachdb/cockroach/pull/10209), [#10284](https://github.com/cockroachdb/cockroach/pull/10284), [#10276](https://github.com/cockroachdb/cockroach/pull/10276), [#10217](https://github.com/cockroachdb/cockroach/pull/10217), [#10363](https://github.com/cockroachdb/cockroach/pull/10363) -- The `information_schema.statistics` and `information_schema.views` tables are now supported. [#10220](https://github.com/cockroachdb/cockroach/pull/10220), [#10288](https://github.com/cockroachdb/cockroach/pull/10288) -- [`SHOW TABLES`](https://www.cockroachlabs.com/docs/v1.0/show-tables) now adds a `(dropped)` suffix to tables that are being dropped. [#10063](https://github.com/cockroachdb/cockroach/pull/10063) -- [`SHOW CREATE VIEW`](https://www.cockroachlabs.com/docs/v1.0/show-create-view) now only requires permissions on the view, not the underlying tables. [#10270](https://github.com/cockroachdb/cockroach/pull/10270) - -

Command-Line Interface Changes

- -- When entering a transaction in the [built-in sql shell](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client), the statements are not sent to the server until there is either a `COMMIT`, `ROLLBACK`, or two blank lines. This minimizes the lifetime of the transaction and therefore the risk of conflicts. [#10187](https://github.com/cockroachdb/cockroach/pull/10187) - -

Performance Improvements

- -- Queries that span multiple ranges are now executed across those ranges in parallel. [#9197](https://github.com/cockroachdb/cockroach/pull/9197) -- Time series data (used in the admin UI) is now loaded in parallel. [#10250](https://github.com/cockroachdb/cockroach/pull/10250) -- Rebalancing now reaches an equilibrium faster. [#10330](https://github.com/cockroachdb/cockroach/pull/10330) - -

Bug Fixes

- -- Memory usage of SQL queries is now monitored, and queries that use too much will fail rather than exhaust the server's memory. [#9259](https://github.com/cockroachdb/cockroach/pull/9259) -- Fixed a problem with snapshot error handling that could cause the cluster to be unable to make progress after node failure. -- Computing the `log()` of a very large number no longer causes the server to hang. [#10221](https://github.com/cockroachdb/cockroach/pull/10221) -- Decimal numbers are now parsed correctly in `AS OF SYSTEM TIME` queries. [#10242](https://github.com/cockroachdb/cockroach/pull/10242) -- `concat_ws()` with no arguments no longer crashes the server. [#10309](https://github.com/cockroachdb/cockroach/pull/10309) -- Fixed a bug when a command was executed after its client had disconnected. [#10279](https://github.com/cockroachdb/cockroach/pull/10279) - -

Doc Updates

- -- Clarified and expanded the [`transactions`](https://www.cockroachlabs.com/docs/v1.0/transactions) overview documentation, and completed docs on transaction-specific statements: [#672](https://github.com/cockroachdb/docs/pull/672) - - [`BEGIN`](https://www.cockroachlabs.com/docs/v1.0/begin-transaction) - - [`COMMIT`](https://www.cockroachlabs.com/docs/v1.0/commit-transaction) - - [`SET TRANSACTION`](https://www.cockroachlabs.com/docs/v1.0/set-transaction) - - [`SAVEPOINT cockroach_restart`](https://www.cockroachlabs.com/docs/v1.0/savepoint) - - [`RELEASE SAVEPOINT cockroach_restart`](https://www.cockroachlabs.com/docs/v1.0/release-savepoint) - - [`ROLLBACK`](https://www.cockroachlabs.com/docs/v1.0/rollback-transaction) - - [`SHOW`](https://www.cockroachlabs.com/docs/v1.0/show-vars) -- Added language-specific examples for [`INSERT` statements with `RETURNING`](https://www.cockroachlabs.com/docs/v1.0/insert#insert-and-return-values). [#813](https://github.com/cockroachdb/docs/pull/813) -- Updated the [SQL Feature Support](https://www.cockroachlabs.com/docs/v1.0/sql-feature-support) page to reflect support for [views](https://www.cockroachlabs.com/docs/v1.0/views) (SQL standard) and [interleaved tables](https://www.cockroachlabs.com/docs/v1.0/interleave-in-parent) (CockroachDB extensions). - -
- -

Contributors

- -This release includes 68 merged PRs by 24 authors. We would like to thank the following contributors from the CockroachDB community, including first-time contributors Johan Brandhorst and MaBo. - -- Johan Brandhorst -- MaBo -- Yan Long -- songhao - -
diff --git a/src/current/_includes/releases/v1.0/beta-20161110.md b/src/current/_includes/releases/v1.0/beta-20161110.md deleted file mode 100644 index e802d616089..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20161110.md +++ /dev/null @@ -1,7 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -{{site.data.alerts.callout_danger}} -Data corruption has been observed when upgrading to this release from prior versions, so it has been withdrawn. -{{site.data.alerts.end}} diff --git a/src/current/_includes/releases/v1.0/beta-20161201.md b/src/current/_includes/releases/v1.0/beta-20161201.md deleted file mode 100644 index 93fb83106cd..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20161201.md +++ /dev/null @@ -1,140 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Upgrade Notes

- -Due to changes in the on-disk format of internal range leases, this release cannot be run concurrently with any prior release. All servers running older versions must be stopped before starting any node with this version. [#10420](https://github.com/cockroachdb/cockroach/pull/10420) - -We realize that "stop the world" upgrades are overly interruptive and are actively working on infrastructure improvements to drastically reduce the need for such upgrades in the future. - -

Build Changes

- -- CockroachDB now *vendors* its dependencies, so building from source no longer interferes with other project in the `$GOPATH`. [#9900](https://github.com/cockroachdb/cockroach/pull/9900) - -

SQL Language Changes

- -- Adding a [`FOREIGN KEY`](https://www.cockroachlabs.com/docs/v1.0/foreign-key) constraint now automatically creates necessary indexes. [#9572](https://github.com/cockroachdb/cockroach/pull/9572) -- The `pg_catalog.pg_roles`, `pg_catalog.pg_description`, `pg_catalog.pg_settings`, and `pg_catalog.pg_index` tables are now supported. [#10377](https://github.com/cockroachdb/cockroach/pull/10377) [#10381](https://github.com/cockroachdb/cockroach/pull/10381) [#10293](https://github.com/cockroachdb/cockroach/pull/10293) [#10548](https://github.com/cockroachdb/cockroach/pull/10548) [#10592](https://github.com/cockroachdb/cockroach/pull/10592) -- The `pg_catalog.pg_depend` table is now partially supported (just enough to support `pgjdbc`). [#10696](https://github.com/cockroachdb/cockroach/pull/10696) -- `pg_catalog` tables now report databases as belonging to PostgreSQL-compatible namespaces. This is for compatibility only; CockroachDB does not have a notion of namespaces. [#11603](https://github.com/cockroachdb/cockroach/pull/11603) -- The `pg_catalog.pg_get_expr` and `pg_catalog.pg_generate_series` functions are now supported. [#10952](https://github.com/cockroachdb/cockroach/pull/10952) [#11214](https://github.com/cockroachdb/cockroach/pull/11214) -- The `conkey` and `confkey` columns of the `pg_catalog.pg_constraint` table now return real integer arrays. [#10584](https://github.com/cockroachdb/cockroach/pull/10584) -- New [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) `from_ip()` and `to_ip()` convert between binary and textual IP address formats. [#10349](https://github.com/cockroachdb/cockroach/pull/10349) -- Tuple types can now be returned by queries. [#10380](https://github.com/cockroachdb/cockroach/pull/10380) -- The SQL standard interval format `Y-M-D` is now supported. [#10499](https://github.com/cockroachdb/cockroach/pull/10499) -- The `array_length`, `array_upper`, and `array_lower` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) are now available. [#10565](https://github.com/cockroachdb/cockroach/pull/10565) -- The `to_uuid` and `from_uuid` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) are now available. [#10541](https://github.com/cockroachdb/cockroach/pull/10541) -- The `pow()` and `div()` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) now work when both arguments are integers. [#10538](https://github.com/cockroachdb/cockroach/pull/10538) -- The `ARRAY[]` constructor syntax is now supported. [#10585](https://github.com/cockroachdb/cockroach/pull/10585) -- The `COLLATE` operator is now supported. Collation support is still incomplete and will be improved in upcoming releases. [#10605](https://github.com/cockroachdb/cockroach/pull/10605) -- The `current_schema` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) is now supported. [#10707](https://github.com/cockroachdb/cockroach/pull/10707) -- The `SET APPLICATION_NAME` statement is now supported. This is a write-only variable which is used by some frameworks but has no effect. [#10725](https://github.com/cockroachdb/cockroach/pull/10725) -- The [`CREATE DATABASE`](https://www.cockroachlabs.com/docs/v1.0/create-database) statement now accepts the options `TEMPLATE`, `LC_COLLATE`, and `LC_CTYPE` for compatibility with PostgreSQL, although the values available for these options are limited. [#10775](https://github.com/cockroachdb/cockroach/pull/10775) -- The [`SELECT`](https://www.cockroachlabs.com/docs/v1.0/select) statement now supports the `WITH ORDINALITY` modifier to generate row numbers. [#10558](https://github.com/cockroachdb/cockroach/pull/10558) - -

Command-Line Interface Changes

- -- Temporary directories are no longer created automatically for logging. Commands other than `cockroach start` log to `stderr` if no `--log-dir` flag is given, and `cockroach start` writes to the directory of the first store. Various combinations of `--log-dir`, `--logtostderr`, and `--alsologtostderr` now work in more sensible ways. [#10675](https://github.com/cockroachdb/cockroach/pull/10675) [#10926](https://github.com/cockroachdb/cockroach/pull/10926) [#10962](https://github.com/cockroachdb/cockroach/pull/10962) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) prompt now indicates the status of the current transaction (if any). [#10866](https://github.com/cockroachdb/cockroach/pull/10866) -- The `--password` flag for the [`cockroach user set`](https://www.cockroachlabs.com/docs/v1.0/create-and-manage-users) command no longer accepts the new password as a command-line argument; instead this flag is a boolean to determine whether a password should be created for the user. The password is always read from `stdin`. [#10680](https://github.com/cockroachdb/cockroach/pull/10680) -- The [`cockroach user set`](https://www.cockroachlabs.com/docs/v1.0/create-and-manage-users) command no longer prompts for passwords in insecure mode unless `--password` is given. [#10547](https://github.com/cockroachdb/cockroach/pull/10547) -- The `cockroach debug compact` command now always rewrites all data, allowing it to pick up configuration changes. [#10532](https://github.com/cockroachdb/cockroach/pull/10532) - -

Admin UI Changes

- -- The design of the navigation elements has been updated. [#10611](https://github.com/cockroachdb/cockroach/pull/10611) -- The design of the Database tab has been updated. [#10552](https://github.com/cockroachdb/cockroach/pull/10552) -- Syntax highlighting is now used when displaying `CREATE TABLE` statements. [#10579](https://github.com/cockroachdb/cockroach/pull/10579) -- The UI no longer attempts to display information about the virtual databases `information_schema` and `pg_catalog`. [#10920](https://github.com/cockroachdb/cockroach/pull/10920) -- Metrics for the fraction of available and fully-replicated ranges are now computed more accurately. [#11213](https://github.com/cockroachdb/cockroach/pull/11213) - -

Performance Improvements

- -- Increased the speed of some mathematical operations (Log, Pow) on [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) values. [#11669](https://github.com/cockroachdb/cockroach/pull/11669) -- Reduced rebalancer thrashing by dynamically adjusting how often store capacities are gossiped. #11662 -- Adding new replicas is now prioritized over removing dead ones. [#10492](https://github.com/cockroachdb/cockroach/pull/10492) [#10683](https://github.com/cockroachdb/cockroach/pull/10683) -- Replicating ranges to a new node is now more reliably performed back-to-back. [#10440](https://github.com/cockroachdb/cockroach/pull/10440) [#10749](https://github.com/cockroachdb/cockroach/pull/10749) -- The rebalancing system is now less prone to thrashing. [#10761](https://github.com/cockroachdb/cockroach/pull/10761) -- Raft log truncation is now aware of pending snapshots. [#10482](https://github.com/cockroachdb/cockroach/pull/10482) -- Raft snapshots are now applied more efficiently. [#10931](https://github.com/cockroachdb/cockroach/pull/10931) -- Raft now sends log entries in smaller batches. [#10929](https://github.com/cockroachdb/cockroach/pull/10929) -- Replica garbage collection is now triggered more reliably by replication changes. [#10500](https://github.com/cockroachdb/cockroach/pull/10500) -- Old replicas that are blocking other operations are now prioritized for garbage collection. [#10426](https://github.com/cockroachdb/cockroach/pull/10426) -- Small clusters now run their replica scanners more frequently by default. [#10433](https://github.com/cockroachdb/cockroach/pull/10433) -- Reduced contention in the command queue for multi-range operations. [#10470](https://github.com/cockroachdb/cockroach/pull/10470) -- Operations that have already expired are no longer added to the command queue. [#10487](https://github.com/cockroachdb/cockroach/pull/10487) -- Reduced allocations for SQL row data. [#10534](https://github.com/cockroachdb/cockroach/pull/10534) -- Reduced memory allocations when encoding dates and times. [#10531](https://github.com/cockroachdb/cockroach/pull/10531) -- Parsing hexadecimal literals using the `x''` syntax is now more efficient. [#10660](https://github.com/cockroachdb/cockroach/pull/10660) -- Time series pruning is now more efficient. [#10682](https://github.com/cockroachdb/cockroach/pull/10682) -- Commands that have been canceled by clients no longer accumulate in the internal command queue. [#10772](https://github.com/cockroachdb/cockroach/pull/10772) -- When a command that has been retried on multiple replicas succeeds on any of them, any in-flight attempts on the other replicas is canceled. [#10970](https://github.com/cockroachdb/cockroach/pull/10970) -- The fast-path for local "RPCs" now uses goroutines to avoid blocking the caller. [#11196](https://github.com/cockroachdb/cockroach/pull/11196) - -

Bug Fixes

- -- Fixed a bug that caused over-aggressive Raft log truncation, which in turn led to an excessive number of Raft initiated snapshots. [#11720](https://github.com/cockroachdb/cockroach/pull/11720) -- Fixed a bug that caused data corruption when upgrading from `beta-20161103` to `beta-20161110`. [#10681](https://github.com/cockroachdb/cockroach/pull/10681) [#10724](https://github.com/cockroachdb/cockroach/pull/10724) -- A node that is stopped and restarted quickly can no longer produce inconsistent results. [#10420](https://github.com/cockroachdb/cockroach/pull/10420) -- Fixed bugs that prevented communication with a node that was previously down. [#10642](https://github.com/cockroachdb/cockroach/pull/10642) [#10652](https://github.com/cockroachdb/cockroach/pull/10652) -- It is now possible for a new node to start up using the same network address as a node that had previously existed. [#10544](https://github.com/cockroachdb/cockroach/pull/10544) [#10699](https://github.com/cockroachdb/cockroach/pull/10699) -- Replication snapshots now release their resources earlier, preventing deadlocks. [#10491](https://github.com/cockroachdb/cockroach/pull/10491) -- Fixed a bug with time series garbage collection when the time series data spans multiple ranges. [#10400](https://github.com/cockroachdb/cockroach/pull/10400) -- Fixed several bugs with very large [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) values or very small fractions. [#10446](https://github.com/cockroachdb/cockroach/pull/10446) [#10559](https://github.com/cockroachdb/cockroach/pull/10559) [#10570](https://github.com/cockroachdb/cockroach/pull/10570) [#10934](https://github.com/cockroachdb/cockroach/pull/10934) -- The `pow()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) now returns an error when its arguments are too large. [#10525](https://github.com/cockroachdb/cockroach/pull/10525) -- Fixed a crash when the number of placeholders in a query doesn't match the number of arguments. [#10474](https://github.com/cockroachdb/cockroach/pull/10474) -- Improved error handling when a SQL [transaction](https://www.cockroachlabs.com/docs/v1.0/transactions) exceeds an internal deadline. [#9906](https://github.com/cockroachdb/cockroach/pull/9906) -- Fixed a panic in raft leadership transfers. [#10530](https://github.com/cockroachdb/cockroach/pull/10530) -- Fixed a leak in [`CREATE TABLE AS`](https://www.cockroachlabs.com/docs/v1.0/create-table-as) and [`CREATE VIEW`](https://www.cockroachlabs.com/docs/v1.0/create-view). [#10527](https://github.com/cockroachdb/cockroach/pull/10527) -- Fixed a panic "range lookup of meta key found only non-matching ranges". [#10583](https://github.com/cockroachdb/cockroach/pull/10583) -- The consistency checker now runs to completion instead of canceling itself before finishing. [#10625](https://github.com/cockroachdb/cockroach/pull/10625) -- Internal retries no longer cause transaction replay errors. [#10639](https://github.com/cockroachdb/cockroach/pull/10639) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) command no longer crashes when given a URL without a username. [#10862](https://github.com/cockroachdb/cockroach/pull/10862) -- Fixed a deadlock in `RemoveReplica`. [#10875](https://github.com/cockroachdb/cockroach/pull/10875) -- `result is ambiguous` errors are now returned in more situations. [#10703](https://github.com/cockroachdb/cockroach/pull/10703) [#11211](https://github.com/cockroachdb/cockroach/pull/11211) -- The `ALTER TABLE SPLIT AT` command will now retry internally instead of returning errors about conflicting updates. [#10728](https://github.com/cockroachdb/cockroach/pull/10728) -- Fixed a panic that could occur when a node is restarted after an unclean shutdown. [#10690](https://github.com/cockroachdb/cockroach/pull/10690) -- Fixed a panic when replicas are removed and re-added rapidly. [#11699](https://github.com/cockroachdb/cockroach/pull/11699) -- The `sign()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) with a [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) argument now returns a decimal rather than a float. [#10954](https://github.com/cockroachdb/cockroach/pull/10954) -- Fixed a panic in the raft tick loop. [#11622](https://github.com/cockroachdb/cockroach/pull/11622) -- The `LIMIT` operator now works correctly in [`INSERT ... SELECT`](https://www.cockroachlabs.com/docs/v1.0/insert). [#11632](https://github.com/cockroachdb/cockroach/pull/11632) - -

Doc Updates

- -- Expanded the [cloud deployment](https://www.cockroachlabs.com/docs/v1.0/cloud-deployment) documentation to cover Microsoft Azure and secure deployment on all featured platforms: - - [AWS](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-aws) - - [Azure](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-microsoft-azure) - - [Digital Ocean](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-digital-ocean) - - [GCE](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-google-cloud-platform) -- Added interactive asciicasts to getting started tutorials. -- Added tutorials that demonstrate core features of CockroachDB: [#834](https://github.com/cockroachdb/docs/pull/834) [#878](https://github.com/cockroachdb/docs/pull/878) [#899](https://github.com/cockroachdb/docs/pull/899) - - [Data Replication](https://www.cockroachlabs.com/docs/v1.0/demo-data-replication) - - [Fault Tolerance & Recovery](https://www.cockroachlabs.com/docs/v1.0/demo-fault-tolerance-and-recovery) - - [Automatic Rebalancing](https://www.cockroachlabs.com/docs/v1.0/demo-automatic-rebalancing) -- Added documentation on user management and password-based authentication: [#838](https://github.com/cockroachdb/docs/pull/838) - - [`CREATE USER`](https://www.cockroachlabs.com/docs/v1.0/create-user) statement - - [`cockroach user`](https://www.cockroachlabs.com/docs/v1.0/create-and-manage-users) command -- Added documentation on the [`information_schema`](https://www.cockroachlabs.com/docs/v1.0/information-schema) built-in database, which provides introspection into database tables, columns, indexes, and views, and which is required for ORM compatibility. [#859](https://github.com/cockroachdb/docs/pull/859) -- Updated [`SELECT`](https://www.cockroachlabs.com/docs/v1.0/select) statement documentation to cover using "index hints". [#894](https://github.com/cockroachdb/docs/pull/894) -- Updated [`FOREIGN KEY`](https://www.cockroachlabs.com/docs/v1.0/foreign-key) constraint documentation to clarify that creating a table with a foreign key now automatically creates an index for you. [#895](https://github.com/cockroachdb/docs/pull/895) -- Updated `RENAME` statement documentation to clarify that you cannot rename a [database](https://www.cockroachlabs.com/docs/v1.0/rename-database), [table](https://www.cockroachlabs.com/docs/v1.0/rename-table), [column](https://www.cockroachlabs.com/docs/v1.0/rename-column), or [index](https://www.cockroachlabs.com/docs/v1.0/rename-index) reference by a [view](https://www.cockroachlabs.com/docs/v1.0/views). [#873](https://github.com/cockroachdb/docs/pull/873) -- Updated the [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) command documentation to cover the `--max-sql-memory` flag. [#868](https://github.com/cockroachdb/docs/pull/868) -- Updated the [`cockroach zone`](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) command documentation to reflect the correct YAML structure. [#902](https://github.com/cockroachdb/docs/pull/902) -- Fixed the Rust code samples on [Build an App](https://www.cockroachlabs.com/docs/v1.0/build-a-rust-app-with-cockroachdb). [#863](https://github.com/cockroachdb/docs/pull/863) - -
- -

Contributors

- -This release includes 292 merged PRs by 30 authors. We would like to thank the following contributors from the CockroachDB community, including first-time contributors Christian Gati, Dustin Hiatt, kiran, and Nathan Johnson. - -- Christian Gati -- Dustin Hiatt -- Haines Chan -- kiran -- Nathan Johnson -- songhao -- yznming - -
diff --git a/src/current/_includes/releases/v1.0/beta-20161208.md b/src/current/_includes/releases/v1.0/beta-20161208.md deleted file mode 100644 index 55901da2b79..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20161208.md +++ /dev/null @@ -1,56 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

SQL Language Changes

- -- The `pg_catalog` and [`information_schema`](https://www.cockroachlabs.com/docs/v1.0/information-schema) databases now include information about the current database only, unless the user is `root`. [#11694](https://github.com/cockroachdb/cockroach/pull/11694) -- The `pg_catalog.pg_range` table is now supported. [#11725](https://github.com/cockroachdb/cockroach/pull/11725) -- The `pg_get_userbyid` function is now supported. [#12162](https://github.com/cockroachdb/cockroach/pull/12162) -- The [`SHOW TABLES`](https://www.cockroachlabs.com/docs/v1.0/show-tables), [`SHOW DATABASES`](https://www.cockroachlabs.com/docs/v1.0/show-databases), [`SHOW GRANTS`](https://www.cockroachlabs.com/docs/v1.0/show-grants) and [`SHOW COLUMNS`](https://www.cockroachlabs.com/docs/v1.0/show-columns) statements are now aliases for queries on the [`information_schema`](https://www.cockroachlabs.com/docs/v1.0/information-schema) database. [#10196](https://github.com/cockroachdb/cockroach/pull/10196) -- Empty passwords are no longer allowed in the [`CREATE USER WITH PASSWORD`](https://www.cockroachlabs.com/docs/v1.0/create-user) statement. [#11781](https://github.com/cockroachdb/cockroach/pull/11781) - -

Command-Line Interface Changes

- -- Log file names have changed. The new format is `cockroach.kenabook.kena.2016-11-28T20_00_35Z.009524.ERROR.log`. [#11666](https://github.com/cockroachdb/cockroach/pull/11666) - -

Admin UI Changes

- -- On the **Cluster Overview** page, added a **Summary** section showing total nodes and capacity used, and updated the overall navigation and style. [#11696](https://github.com/cockroachdb/cockroach/pull/11696) [#11754](https://github.com/cockroachdb/cockroach/pull/11754) -- Several "allocator" metrics have been replaced with a new "underreplicated range" metric. [#11733](https://github.com/cockroachdb/cockroach/pull/11733) [#11983](https://github.com/cockroachdb/cockroach/pull/11983) -- The "ranges available" metric has been replaced with a "ranges unavailable" metric, which is computed differently. [#11760](https://github.com/cockroachdb/cockroach/pull/11760) -- New metrics have been added for the replication queue and replica GC queue. [#11753](https://github.com/cockroachdb/cockroach/pull/11753) [#11785](https://github.com/cockroachdb/cockroach/pull/11785) -- New metrics have been added for RPCs. [#11711](https://github.com/cockroachdb/cockroach/pull/11711) -- New metrics have been added to track commands that have been stuck for a long time. [#12106](https://github.com/cockroachdb/cockroach/pull/12106) - -

Performance Improvements

- -- Range leases are now balanced across the nodes in a cluster. [#11757](https://github.com/cockroachdb/cockroach/pull/11757) -- Raft heartbeats are once again coalesced into one message per pair of nodes instead of per range. [#11757](https://github.com/cockroachdb/cockroach/pull/11757) -- Persisting the applied index is now faster. [#9993](https://github.com/cockroachdb/cockroach/pull/9993) -- The replication queue no longer tries to reprocess the same range repeatedly. [#11982](https://github.com/cockroachdb/cockroach/pull/11982) -- Limited frequent reprocessing of ranges for time series maintenance and consistency checks. [#10665](https://github.com/cockroachdb/cockroach/pull/10665) -- The time series maintenance queue no longer sends commands when it has nothing to do. [#11762](https://github.com/cockroachdb/cockroach/pull/11762) -- The `pow()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) is now faster when small numbers are raised to very large powers. [#11738](https://github.com/cockroachdb/cockroach/pull/11738) -- `JOIN` queries now only scan the columns they need. [#11736](https://github.com/cockroachdb/cockroach/pull/11736) -- Micro-optimized logging and removed some noisy log events. [#11778](https://github.com/cockroachdb/cockroach/pull/11778) - -

Bug Fixes

- -- Fixed a panic when sending a raft message fails. [#11985](https://github.com/cockroachdb/cockroach/pull/11985) -- Fixed a memory spike that could occur during asymmetric partitions. [#12100](https://github.com/cockroachdb/cockroach/pull/12100) -- [`DROP DATABASE`](https://www.cockroachlabs.com/docs/v1.0/drop-database) now works when foreign key constraints are present. [#12036](https://github.com/cockroachdb/cockroach/pull/12036) -- [`DROP DATABASE`](https://www.cockroachlabs.com/docs/v1.0/drop-database) no longer hangs when subqueries have been used. [#11730](https://github.com/cockroachdb/cockroach/pull/11730) -- Requests no longer get stuck forever after a timeout. [#12000](https://github.com/cockroachdb/cockroach/pull/12000) -- Comparisons of SQL tuples now work for all types. [#10475](https://github.com/cockroachdb/cockroach/pull/10475) - -
- -

Contributors

- -This release includes 101 merged PRs by 19 authors. We would like to thank the following contributors from the CockroachDB community. - -- songhao -- yznming - -
diff --git a/src/current/_includes/releases/v1.0/beta-20161215.md b/src/current/_includes/releases/v1.0/beta-20161215.md deleted file mode 100644 index d366e0049b4..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20161215.md +++ /dev/null @@ -1,40 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Changes

- -- CockroachDB now uses RocksDB version 4.11.2. [#9616](https://github.com/cockroachdb/cockroach/pull/9616) - -

SQL Language Changes

- -- Additional support for standard database introspection required by popular ORMs. These features apply to built-in tables only; eventually, they will be made available for user-generated tables as well. - - The `array_agg` [aggregate function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions) is now supported for ints and strings. [#12186](https://github.com/cockroachdb/cockroach/pull/12186) - - The `ANY`, `SOME` and `ALL` array operators are now supported. [#12102](https://github.com/cockroachdb/cockroach/pull/12102) - - `GROUP BY` can now be used with columns of type `ARRAY`. [#12198](https://github.com/cockroachdb/cockroach/pull/12198) -- The `SET` command now recognizes the `search_path`, `client_encoding`, `standard_conforming_strings`, and `client_min_messages` variables for compatibility with PostgreSQL. Setting these variables currently has no effect. [#12149](https://github.com/cockroachdb/cockroach/pull/12149) -- The `SHOW server_version` command now returns the version of PostgreSQL that CockroachDB most closely resembles. [#12149](https://github.com/cockroachdb/cockroach/pull/12149) - -

Admin UI Changes

- -- Aggregated rates are now computed correctly. [#12200](https://github.com/cockroachdb/cockroach/pull/12200) -- Various stylistic improvements. [#12118](https://github.com/cockroachdb/cockroach/pull/12118) [#12152](https://github.com/cockroachdb/cockroach/pull/12152) -- Metrics have been added for lease-related operations. [#12205](https://github.com/cockroachdb/cockroach/pull/12205) - -

Performance Improvements

- -- Raft snapshots are now applied one at a time. [#12192](https://github.com/cockroachdb/cockroach/pull/12192) - -

Bug Fixes

- -- The repair system now detects unresponsive nodes more accurately in the event of asymmetric partitions. [#12178](https://github.com/cockroachdb/cockroach/pull/12178) -- String arrays are now quoted correctly on the wire. [#12268](https://github.com/cockroachdb/cockroach/pull/12268) - -

Doc Updates

- -- Each SQL [constraint](https://www.cockroachlabs.com/docs/v1.0/constraints) now has a dedicated page of documentation. [#901](https://github.com/cockroachdb/docs/pull/901) -- The [`cockroach zone`](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) documentation now offers minimum and maximum recommendations for the `ttlseconds` setting, which defines the number of seconds overwritten values are retained before garbage collection. [#912](https://github.com/cockroachdb/docs/pull/912) - -

Contributors

- -This release includes 62 merged PRs by 18 authors. diff --git a/src/current/_includes/releases/v1.0/beta-20170105.md b/src/current/_includes/releases/v1.0/beta-20170105.md deleted file mode 100644 index 4cfb3955c85..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170105.md +++ /dev/null @@ -1,84 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Changes

- -- CockroachDB now uses a more recent version of GRPC. [#9697](https://github.com/cockroachdb/cockroach/pull/9697) - -

SQL Language Changes

- -- `FILTER` clauses for [aggregate functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions) are now supported. See the [`SELECT`](https://www.cockroachlabs.com/docs/v1.0/select#filter-columns-fed-into-aggregate-functions) documentation for examples. [#10146](https://github.com/cockroachdb/cockroach/pull/10146) -- Columns can now be created with collated string types, although these columns cannot yet be indexed or part of the primary key. [#12294](https://github.com/cockroachdb/cockroach/pull/12294) -- `INTERVAL` values can now be [added](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#operators) to `DATE` values. [#12428](https://github.com/cockroachdb/cockroach/pull/12428) -- `TIMESTAMP` values can now be [compared](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#operators) to `DATE` values. [#12431](https://github.com/cockroachdb/cockroach/pull/12431) -- `DATE` literals in [views](https://www.cockroachlabs.com/docs/v1.0/views) now work correctly. [#12450](https://github.com/cockroachdb/cockroach/pull/12450) -- Parsing of [`INTERVAL`](https://www.cockroachlabs.com/docs/v1.0/interval) values is now more consistent with PostgreSQL. [#12559](https://github.com/cockroachdb/cockroach/pull/12559) [#12566](https://github.com/cockroachdb/cockroach/pull/12566) -- `ORDER BY` now works correctly when sorting by the result of an [aggregate function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions). [#12185](https://github.com/cockroachdb/cockroach/pull/12185) -- `ORDER BY` now returns an error in more cases when its arguments are ambiguous. [#12255](https://github.com/cockroachdb/cockroach/pull/12255) -- The `experimental_unique_bytes()` function has been removed. [#12228](https://github.com/cockroachdb/cockroach/pull/12228) -- The `extract()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) now supports `DATE` values. As a consequence, `extract()` no longer supports `STRING` values, which must now be explicitly cast to `TIMESTAMP` or `DATE`. [#12479](https://github.com/cockroachdb/cockroach/pull/12479) -- The `pg_indexes.pg_indexes` now includes the `oid` column. [#12359](https://github.com/cockroachdb/cockroach/pull/12359) -- The `pg_catalog.pg_get_indexdef` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) is now supported. [#12359](https://github.com/cockroachdb/cockroach/pull/12359) -- The `array_agg()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) now handles `NULL` values correctly. [#12534](https://github.com/cockroachdb/cockroach/pull/12534) - -

Admin UI Changes

- -- Metrics for the number of live nodes and the number of Raft commands are now reported. [#12296](https://github.com/cockroachdb/cockroach/pull/12296) -- Improved the time selector. [#12360](https://github.com/cockroachdb/cockroach/pull/12360) -- Added new summary statistics. [#12486](https://github.com/cockroachdb/cockroach/pull/12486) -- Graph axes have been improved. [#12427](https://github.com/cockroachdb/cockroach/pull/12427) -- The event list has been updated. [#12638](https://github.com/cockroachdb/cockroach/pull/12638) - -

Performance Improvements

- -- When a node stops responding to heartbeats, non-heartbeat -Raft traffic to that node is suspended until it becomes responsive again. [#12631](https://github.com/cockroachdb/cockroach/pull/12631) -- Improved logic for transferring Raft leadership to coincide with the lease holder. [#12323](https://github.com/cockroachdb/cockroach/pull/12323) -- Fixed a thundering herd problem after a node goes down. [#12352](https://github.com/cockroachdb/cockroach/pull/12352) -- Prevented unnecessary retries of Raft commands. [#12487](https://github.com/cockroachdb/cockroach/pull/12487) -- Improved lease rebalancing in deployments with multiple stores per node. [#12565](https://github.com/cockroachdb/cockroach/pull/12565) -- Reduced the number of `cgo` calls made to RocksDB. [#12343](https://github.com/cockroachdb/cockroach/pull/12343) - -

Bug Fixes

- -- Commands are now canceled correctly if the replica tracking them is removed while they are pending. [#12313](https://github.com/cockroachdb/cockroach/pull/12313) -- Raft groups are always initialized when handling messages. [#12465](https://github.com/cockroachdb/cockroach/pull/12465) -- Improved error handling for invalid UTF-8 strings. [#12194](https://github.com/cockroachdb/cockroach/pull/12194) -- Fixed several bugs in `FULL` and `RIGHT OUTER JOIN`. [#12364](https://github.com/cockroachdb/cockroach/pull/12364) -- Fixed a regression in `OUTER JOIN` operations with predicates. [#12350](https://github.com/cockroachdb/cockroach/pull/12350) -- Fixed type checking in `window` definitions. [#12483](https://github.com/cockroachdb/cockroach/pull/12483) -- Fixed a bug when adding columns with families to tables created by certain older beta versions of CockroachDB. [#12475](https://github.com/cockroachdb/cockroach/pull/12475) -- It is now possible to disable logging output to files with `--log-dir=`. [#12662](https://github.com/cockroachdb/cockroach/pull/12662) -- When logging to both `stderr` and files, all desired logging output is now properly emitted to files, not only to `stderr`. [#12662](https://github.com/cockroachdb/cockroach/pull/12662) - -

Internal Changes

- -- A framework has been introduced for migrating existing clusters to new definitions for internal tables. [#11658](https://github.com/cockroachdb/cockroach/pull/11658) - -

Doc Updates

- -- Added a tutorial and sample configs for [Monitoring CockroachDB with Prometheus, Grafana, and Alertmanager](https://www.cockroachlabs.com/docs/v1.0/monitor-cockroachdb-with-prometheus) [#928](https://github.com/cockroachdb/docs/pull/928), [#12288](https://github.com/cockroachdb/cockroach/pull/12288) -- Updated the [Orchestrate CockroachDB with Kubernetes](https://www.cockroachlabs.com/docs/v1.0/orchestrate-cockroachdb-with-kubernetes) tutorial and sample configs for Kubernetes 1.5 [#940](https://github.com/cockroachdb/docs/pull/940) [#12536](https://github.com/cockroachdb/cockroach/pull/12536) -- Descriptions have been added to all [built-in functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators). [#12533](https://github.com/cockroachdb/cockroach/pull/12533) -- Added an interactive asciicast to the [Build a Python App with CockroachDB](https://www.cockroachlabs.com/docs/v1.0/build-a-python-app-with-cockroachdb) tutorial. [#937](https://github.com/cockroachdb/docs/pull/937) -- Added documentation on each [`ALTER TABLE`](https://www.cockroachlabs.com/docs/v1.0/alter-table) sub-statement: [#950](https://github.com/cockroachdb/docs/pull/950) - - [`ADD COLUMN`](https://www.cockroachlabs.com/docs/v1.0/add-column) - - [`ADD CONSTRAINT`](https://www.cockroachlabs.com/docs/v1.0/add-constraint) - - [`ALTER COLUMN`](https://www.cockroachlabs.com/docs/v1.0/alter-column) - - [`DROP COLUMN`](https://www.cockroachlabs.com/docs/v1.0/drop-column) - - [`DROP CONSTRAINT`](https://www.cockroachlabs.com/docs/v1.0/drop-constraint) - - [`RENAME COLUMN`](https://www.cockroachlabs.com/docs/v1.0/rename-column) - - [`RENAME TABLE`](https://www.cockroachlabs.com/docs/v1.0/rename-table) -- Added documentation on the [`SHOW USERS`](https://www.cockroachlabs.com/docs/v1.0/show-users) statement. [#939](https://github.com/cockroachdb/docs/pull/939) -- Clarified that [password-based authentication](https://www.cockroachlabs.com/docs/v1.0/create-and-manage-users#user-authentication) cannot be used for the `root` user. [#938](https://github.com/cockroachdb/docs/pull/938) - -
- -

Contributors

- -This release includes 122 merged PRs by 25 authors. We would like to thank the following contributors from the CockroachDB community: - -- Haines Chan - -
diff --git a/src/current/_includes/releases/v1.0/beta-20170112.md b/src/current/_includes/releases/v1.0/beta-20170112.md deleted file mode 100644 index 19941df6f78..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170112.md +++ /dev/null @@ -1,60 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Upgrade Notes

- -- This release cannot be run concurrently with older beta releases. Please stop all nodes running older releases before restarting any node with this version. - -

SQL Language Changes

- -- The `ALTER TABLE VALIDATE CONSTRAINT` statement can now be used to validate foreign keys in the `UNVALIDATED` state. [#12682](https://github.com/cockroachdb/cockroach/pull/12682) -- [`ALTER TABLE DROP COLUMN`](https://www.cockroachlabs.com/docs/v1.0/drop-column) can now drop columns that are involved in indexes. If the index includes columns other than the one being dropped, the `CASCADE` modifier is required. [#12782](https://github.com/cockroachdb/cockroach/pull/12782) -- Leading zeros in numeric literals are now ignored instead of causing the number to be parsed as octal. [#12803](https://github.com/cockroachdb/cockroach/pull/12803) -- Collated strings can now be cast to the non-collated string type. [#12820](https://github.com/cockroachdb/cockroach/pull/12820) -- Arithmetic with [aggregated functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions) no longer crashes when the input table is empty. [#12733](https://github.com/cockroachdb/cockroach/pull/12733) -- The [`SHOW COLUMNS`](https://www.cockroachlabs.com/docs/v1.0/show-columns) statement now works for non-root users with the appropriate permissions. [#12776](https://github.com/cockroachdb/cockroach/pull/12776) -- The `information_schema.statistics.NON_UNIQUE` field is now set correctly. [#12793](https://github.com/cockroachdb/cockroach/pull/12793) -- It is now possible to use `EXPLAIN` statements as an input to other queries using a new square bracket syntax: `SELECT MAX(level) FROM [EXPLAIN SELECT * FROM kv ORDER BY v]`. [#12637](https://github.com/cockroachdb/cockroach/pull/12637) -- The format of SQL error messages has been standardized. [#12401](https://github.com/cockroachdb/cockroach/pull/12401) - -

Command-Line Interface Changes

- -- The prototype `cockroach backup` and `cockroach restore` commands have been removed (backup and restore functionality will be provided through SQL statements instead). [#12788](https://github.com/cockroachdb/cockroach/pull/12788) - -

Admin UI Changes

- -- The set of graphs displayed in the UI has been changed. [#12797](https://github.com/cockroachdb/cockroach/pull/12797) -- The node list and cluster pages has been updated. [#12643](https://github.com/cockroachdb/cockroach/pull/12643) [#12719](https://github.com/cockroachdb/cockroach/pull/12719) -- A new metric `raftlog.behind` is now reported to track how far behind raft processing is on a node. [#12800](https://github.com/cockroachdb/cockroach/pull/12800) - -

Bug Fixes

- -- Ranges can no longer be rebalanced away from their current lease holder. [#12598](https://github.com/cockroachdb/cockroach/pull/12598) -- Eliminated a source of incorrect "not lease holder" errors. [#12542](https://github.com/cockroachdb/cockroach/pull/12542) -- Reduced spammy "purgatory" log messages. [#12528](https://github.com/cockroachdb/cockroach/pull/12528) - -

Performance Improvements

- -- The maximum allowable clock offset has been increased from 250ms to 500ms. [#12821](https://github.com/cockroachdb/cockroach/pull/12821) -- Replica destruction is now much faster. [#12745](https://github.com/cockroachdb/cockroach/pull/12745) -- Raft snapshots and replica destruction no longer run concurrently. [#12737](https://github.com/cockroachdb/cockroach/pull/12737) -- Raft snapshots are now managed by a new internal queue. [#12686](https://github.com/cockroachdb/cockroach/pull/12686) -- Raft logs are now garbage collected more aggressively. [#12686](https://github.com/cockroachdb/cockroach/pull/12686) -- Replicas that are behind are no longer considered targets for lease rebalancing. [#12736](https://github.com/cockroachdb/cockroach/pull/12736) -- Tuned some RocksDB parameters. [#12677](https://github.com/cockroachdb/cockroach/pull/12677) - -

Doc Updates

- -- Added a Java code sample for transaction retry logic to the [Build a Java App with CockroachDB](https://www.cockroachlabs.com/docs/v1.0/build-a-java-app-with-cockroachdb) tutorial. [#987](https://github.com/cockroachdb/docs/pull/987) -- Added documentation on [SQL type conversions](https://www.cockroachlabs.com/docs/v1.0/data-types#data-type-conversions-casts). [#977](https://github.com/cockroachdb/docs/pull/977) - -
- -

Contributors

- -This release includes 60 merged PRs by 22 authors. We would like to thank the following contributors from the CockroachDB community: - -- songhao - -
diff --git a/src/current/_includes/releases/v1.0/beta-20170126.md b/src/current/_includes/releases/v1.0/beta-20170126.md deleted file mode 100644 index d71b4c047da..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170126.md +++ /dev/null @@ -1,83 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

License Change

- -- CockroachDB is now covered by the [CockroachDB Community License](https://github.com/cockroachdb/cockroach/blob/master/LICENSE). A pure open-source (APL2) version can be obtained by building from source with the `make buildoss` build target. See our [blog post](https://www.cockroachlabs.com/blog/how-were-building-a-business-to-last/) for more details. - -

Backwards-Incompatible Changes

- -- TLS 1.2 is now required to communicate with a CockroachDB server in secure mode. [#12898](https://github.com/cockroachdb/cockroach/pull/12898) -- The hex string syntax `X'0d0a'` now produces [byte values](https://www.cockroachlabs.com/docs/v1.0/sql-constants#hexadecimal-encoded-byte-array-literals) instead of string values, so it can accept non-UTF-8 input. [#13094](https://github.com/cockroachdb/cockroach/pull/13094) - -

Build Changes

- -- Cockroachdb now uses Go 1.7.4. [#12881](https://github.com/cockroachdb/cockroach/pull/12881) - -

New Features

- -- The location of replicas, both when first added and when rebalanced to maintain cluster equilibrium, is now influenced by [`--locality`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) settings at the node-level and more [expressive zone configuration constraints](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones#replication-constraints). [#13119](https://github.com/cockroachdb/cockroach/pull/13119) -- Old log files are now garbage collected automatically. [#13083](https://github.com/cockroachdb/cockroach/pull/13083) - -

SQL Language Changes

- -- The `information_schema.statistics` table and the output of [`SHOW INDEXES`](https://www.cockroachlabs.com/docs/v1.0/show-index) now include columns that are added to the index implicitly (the primary key is an implicit part of secondary indexes). These columns are marked by a new `Implicit` column. [#12801](https://github.com/cockroachdb/cockroach/pull/12801) -- The `COPY` statement is now more compatible with PostgreSQL. [#12723](https://github.com/cockroachdb/cockroach/pull/12723) -- CockroachDB now advertises the `integer_datetimes` parameter, improving compatibility with JDBC. [#12842](https://github.com/cockroachdb/cockroach/pull/12842) -- The `sum()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) now works with [`INTEGER`](https://www.cockroachlabs.com/docs/v1.0/int) values. [#12938](https://github.com/cockroachdb/cockroach/pull/12938) -- The `format_type()` nd `col_description()` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) are now supported. [#12967](https://github.com/cockroachdb/cockroach/pull/12967) [#12978](https://github.com/cockroachdb/cockroach/pull/12978) -- The `pg_catalog.pg_enum` table now exists (although it is always empty because we do not yet support enum types). [#12972](https://github.com/cockroachdb/cockroach/pull/12972) -- The `pg_catalog.pg_collation` table is now supported, along with the `pg_attribute.attcollation` and `pg_type.typcollation` columns. [#12983](https://github.com/cockroachdb/cockroach/pull/12983) -- The `OID` and `NAME` types are now supported (and used in `pg_catalog`) for compatibility with PostgreSQL. [#12641](https://github.com/cockroachdb/cockroach/pull/12641) -- [`SHOW COLUMNS`](https://www.cockroachlabs.com/docs/v1.0/show-columns) now includes a summary of indices that the column is a part of. [#12907](https://github.com/cockroachdb/cockroach/pull/12907) - -

Command-Line Interface Changes

- -- The [`cockroach dump`](https://www.cockroachlabs.com/docs/v1.0/sql-dump) command can now dump multiple tables at the same time. [#12759](https://github.com/cockroachdb/cockroach/pull/12759) -- The [`cockroach dump`](https://www.cockroachlabs.com/docs/v1.0/sql-dump) has a new flag, `--dump-mode`, which can be used to dump the schema or the data for a table separately. [#12759](https://github.com/cockroachdb/cockroach/pull/12759) -- Timestamps are now displayed in a format which is accepted by the SQL parser. [#12886](https://github.com/cockroachdb/cockroach/pull/12886) -- The `--pretty` flag has been removed from commands that never used it. [#12900](https://github.com/cockroachdb/cockroach/pull/12900) -- An error is raised when too many positional command-line arguments are provided. [#12901](https://github.com/cockroachdb/cockroach/pull/12901) - -

Admin UI Changes

- -- When the cursor is over a graph, a vertical line is shown to mark the same time on all graphs. [#12645](https://github.com/cockroachdb/cockroach/pull/12645) - -

Performance Improvements

- -- Table leases are now released when a node shuts down cleanly. [#12501](https://github.com/cockroachdb/cockroach/pull/12501) -- Miscellaneous query optimizations. [#12617](https://github.com/cockroachdb/cockroach/pull/12617) -- The gossip network reaches equilibrium faster in larger clusters. [#12880](https://github.com/cockroachdb/cockroach/pull/12880) [#12920](https://github.com/cockroachdb/cockroach/pull/12920) [#12974](https://github.com/cockroachdb/cockroach/pull/12974) -- When a node is shutting down, SQL connections are closed when they become idle. [#12952](https://github.com/cockroachdb/cockroach/pull/12952) -- Nodes now refuse to accept new ranges if they are falling behind on the ranges they already have. [#13043](https://github.com/cockroachdb/cockroach/pull/13043) - -

Bug Fixes

- -- [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) statements using a secondary index no longer cause server crashes during schema changes. [#12794](https://github.com/cockroachdb/cockroach/pull/12794) -- Improved accuracy and performance of node liveness heartbeats. [#12864](https://github.com/cockroachdb/cockroach/pull/12864) -- Reduced the occurrence of "ambiguous result" errors. [#12557](https://github.com/cockroachdb/cockroach/pull/12557) -- The server no longer ignores signals during initialization. [#12743](https://github.com/cockroachdb/cockroach/pull/12743) -- It is now possible to drop a table with a self-referential foreign key without the `CASCADE` modifier. [#12958](https://github.com/cockroachdb/cockroach/pull/12958) -- Additional data consistency checks have been temporarily enabled. [#12994](https://github.com/cockroachdb/cockroach/pull/12994) -- Fixed a crash when retryable errors are returned inside subqueries. [#13028](https://github.com/cockroachdb/cockroach/pull/13028) -- Node ID allocation is now retried if it fails when a node first starts. [#13107](https://github.com/cockroachdb/cockroach/pull/13107) - -

Doc Updates

- -- Added documentation on [SQL Value Expressions](https://www.cockroachlabs.com/docs/v1.0/sql-expressions), [Table Expressions](https://www.cockroachlabs.com/docs/v1.0/table-expressions), and [Constants](https://www.cockroachlabs.com/docs/v1.0/sql-constants). [#1008](https://github.com/cockroachdb/docs/pull/1008) -- Added documentation on [String Collation](https://www.cockroachlabs.com/docs/v1.0/collate). [#974](https://github.com/cockroachdb/docs/pull/974) -- Update the [`cockroach dump`](https://www.cockroachlabs.com/docs/v1.0/sql-dump) documentation to cover dumping multiple tables as well as choosing whether to dump a table's schema, data, or both. [#1010](https://github.com/cockroachdb/docs/pull/1010) -- Updated the [`cockroach zone`](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) documentation to explain how node-level locality settings and zone configuration constraints influence the location of replicas, and added [scenario-based examples](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones#scenario-based-examples). [#1027](https://github.com/cockroachdb/docs/pull/1027) -- Updated cluster topology guidance in [Recommended Production Settings](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings#cluster-topology). [#1027](https://github.com/cockroachdb/docs/pull/1027) - -
- -

Contributors

- -This release includes 115 merged PRs by 25 authors. We would like to thank the following contributors from the CockroachDB community: - -- DiSiqueira -- Jason E. Aten - -
diff --git a/src/current/_includes/releases/v1.0/beta-20170209.md b/src/current/_includes/releases/v1.0/beta-20170209.md deleted file mode 100644 index 0593201f7b5..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170209.md +++ /dev/null @@ -1,66 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Build Changes

- -- Official binaries for Linux now support older Linux distributions, including RHEL 6. [#13102](https://github.com/cockroachdb/cockroach/pull/13102) -- CockroachDB now uses RocksDB 5.0.2. [#12913](https://github.com/cockroachdb/cockroach/pull/12913) - -

SQL Language Changes

- -- The `unnest()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) is now supported. [#13154](https://github.com/cockroachdb/cockroach/pull/13154) -- It is now possible to join a set-returning [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) like `generate_series()` with itself. [#13140](https://github.com/cockroachdb/cockroach/pull/13140) -- The timestamp parser now accepts an additional format. [#13159](https://github.com/cockroachdb/cockroach/pull/13159) -- The output of [`EXPLAIN`](https://www.cockroachlabs.com/docs/v1.0/explain) has been improved. [#13216](https://github.com/cockroachdb/cockroach/pull/13216) [#13221](https://github.com/cockroachdb/cockroach/pull/13221) -- Hex string literals (`X''`) can now contain byte sequences that are not valid UTF-8. [#13287](https://github.com/cockroachdb/cockroach/pull/13287) -- `COPY` now works outside transactions. [#13400](https://github.com/cockroachdb/cockroach/pull/13400) -- Fixed some edge cases with the `IN` [operator](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#operators). [#13444](https://github.com/cockroachdb/cockroach/pull/13444) -- The `pg_type` table now contains entries with the same names as PostgreSQL, such as `int2`, `int4`, and `int8` instead of just `integer`. [#13355](https://github.com/cockroachdb/cockroach/pull/13355) -- The `pg_backend_pid`, `obj_description`, and `shobj_description` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) and the `pg_inherits`, `pg_foreign_table`, and `pg_foreign_server` tables now have placeholder implementations for compatibility with some clients. [#13097](https://github.com/cockroachdb/cockroach/pull/13097) [#13181](https://github.com/cockroachdb/cockroach/pull/13181) - -

Command-Line Interface Changes

- -- The `--pretty` flag of [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) has been removed, and replaced with a `--format` flag that supports multiple output formats. The command `set display_format` can be used to change the format during a session. [#13171](https://github.com/cockroachdb/cockroach/pull/13171) -- The current database name is now shown in the prompt. [#12747](https://github.com/cockroachdb/cockroach/pull/12747) [#13379](https://github.com/cockroachdb/cockroach/pull/13379) -- Non-interactive usage of [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) is much faster when the input file contains very long lines. [#13174](https://github.com/cockroachdb/cockroach/pull/13174) - -

Admin UI Changes

- -- Introduced a new overview dashboard. [#13348](https://github.com/cockroachdb/cockroach/pull/13348) -- Improved accuracy of histogram data. [#13106](https://github.com/cockroachdb/cockroach/pull/13106) - -

Performance Improvements

- -- All columns in newly-created tables are placed in the same [column family](https://www.cockroachlabs.com/docs/v1.0/column-families) by default. [#13198](https://github.com/cockroachdb/cockroach/pull/13198) -- Scans with a `LIMIT` no longer interfere with writes to rows that match the `WHERE` clause but were not returned due to the `LIMIT`. [#13349](https://github.com/cockroachdb/cockroach/pull/13349) -- Performance is more stable during splits. [#13208](https://github.com/cockroachdb/cockroach/pull/13208) -- Reduced unnecessary disk access in read-only transactions. [#13367](https://github.com/cockroachdb/cockroach/pull/13367) -- Reduced log spam. [#13388](https://github.com/cockroachdb/cockroach/pull/13388) - -

Bug Fixes

- -- Nodes now retry at startup if they are unable to allocate a node ID. [#13148](https://github.com/cockroachdb/cockroach/pull/13148) -- Improved gossiping of system configuration data. [#13167](https://github.com/cockroachdb/cockroach/pull/13167) - -

Doc Updates

- -- Added language-specific tutorials on [building an app with CockroachDB](https://www.cockroachlabs.com/docs/v1.0/build-an-app-with-cockroachdb). - - The [Go tutorial](https://www.cockroachlabs.com/docs/v1.0/build-a-go-app-with-cockroachdb) now covers using the Go pq driver or the GORM ORM. [#1069](https://github.com/cockroachdb/docs/pull/1069) - - The [Python tutorial](https://www.cockroachlabs.com/docs/v1.0/build-a-python-app-with-cockroachdb) now covers using the psycopg2 driver or the SQLAlchemy ORM. [#1079](https://github.com/cockroachdb/docs/pull/1079) - - The [Ruby tutorial](https://www.cockroachlabs.com/docs/v1.0/build-a-ruby-app-with-cockroachdb) now provides a code sample demonstrating transaction retry logic. [#1068](https://github.com/cockroachdb/docs/pull/1068) -- New documentation on the current methods of [importing data into CockroachDB](https://www.cockroachlabs.com/docs/v1.0/import-data). [#1076](https://github.com/cockroachdb/docs/pull/1076) -- New documentation on how [name resolution](https://www.cockroachlabs.com/docs/v1.0/sql-name-resolution) works in CockroachDB. [#1050](https://github.com/cockroachdb/docs/pull/1050) -- New documentation on using the [`EXPLAIN`](https://www.cockroachlabs.com/docs/v1.0/explain) statement to analyze the query plan of `DELETE`, `INSERT`, `SELECT`, or `UPDATE` statements. [#1073](https://github.com/cockroachdb/docs/pull/1073) -- New FAQ on [how CockroachDB is both highly available and strongly consistent without violating the CAP theorem](https://www.cockroachlabs.com/docs/v1.0/frequently-asked-questions#how-is-cockroachdb-both-highly-available-and-strongly-consistent). [#1061](https://github.com/cockroachdb/docs/pull/1061) -- Expanded documentation on [simple `CASE` expressions](https://www.cockroachlabs.com/docs/v1.0/sql-expressions#simple-case-expressions) and [searched `CASE` expressions](https://www.cockroachlabs.com/docs/v1.0/sql-expressions#searched-case-expressions). [#1036](https://github.com/cockroachdb/docs/pull/1036) - -
- -

Contributors

- -This release includes 132 merged PRs by 24 authors. We would like to thank the following contributor from the CockroachDB community: - -- Panos Mamatsis - -
diff --git a/src/current/_includes/releases/v1.0/beta-20170216.md b/src/current/_includes/releases/v1.0/beta-20170216.md deleted file mode 100644 index 0370d66c5ba..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170216.md +++ /dev/null @@ -1,39 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

SQL Language Changes

- -- The [search path](https://www.cockroachlabs.com/docs/v1.0/sql-name-resolution#search-path) for table names can now be set with `SET SEARCH_PATH`. [#13313](https://github.com/cockroachdb/cockroach/pull/13313) -- All built-in [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) are now in the global namespace (instead of `pg_catalog`). [#13404](https://github.com/cockroachdb/cockroach/pull/13404) -- The `pg_extension` table and the `current_database` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) are now supported. The `pg_type` and `pg_namespace` tables have had some changes to improve compatibility with ActiveRecord. [#13429](https://github.com/cockroachdb/cockroach/pull/13429) -- The columns used in foreign keys are now tracked more accurately. [#13439](https://github.com/cockroachdb/cockroach/pull/13439) - -

Admin UI Changes

- -- The correct fonts are now used. [#13539](https://github.com/cockroachdb/cockroach/pull/13539) - -

Bug Fixes

- -- Replicas that fall too far behind no longer get stuck in a state in which they can no longer catch up. [#13515](https://github.com/cockroachdb/cockroach/pull/13515) -- The `cockroach.LEVEL` symlinks created in the log directory now work. [#13585](https://github.com/cockroachdb/cockroach/pull/13585) - -

Performance Improvements

- -- Reduced memory allocations in various places. [#13466](https://github.com/cockroachdb/cockroach/pull/13466) [#13487](https://github.com/cockroachdb/cockroach/pull/13487) [#13455](https://github.com/cockroachdb/cockroach/pull/13455) [#13579](https://github.com/cockroachdb/cockroach/pull/13579) -- The [minimum number of file descriptors](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings) has been reduced to 1956 (1700 per store + 256 for networking) so that CockroachDB can be run more easily on platforms with a default limit of 2048 file descriptors. [#13462](https://github.com/cockroachdb/cockroach/pull/13462) -- [`UPSERT`](https://www.cockroachlabs.com/docs/v1.0/upsert) is now faster in many cases. [#13488](https://github.com/cockroachdb/cockroach/pull/13488) [#13500](https://github.com/cockroachdb/cockroach/pull/13500) - -

Doc Updates

- -- The [sample Kubernetes config](https://www.cockroachlabs.com/docs/v1.0/orchestrate-cockroachdb-with-kubernetes) is now able to recover from a situation in which all nodes are down at once. [#13580](https://github.com/cockroachdb/cockroach/pull/13580) -- The [Build a Java App with CockroachDB](https://www.cockroachlabs.com/docs/v1.0/build-a-java-app-with-cockroachdb) tutorial now covers using the Hibernate ORM in addition to the jdbc driver. [#1100](https://github.com/cockroachdb/docs/pull/1100) -- The [Start a Cluster in Docker](https://www.cockroachlabs.com/docs/v1.0/start-a-local-cluster-in-docker) tutorial now offers OS-specific instructions, with specific improvements for running on Windows. [#1095](https://github.com/cockroachdb/docs/pull/1095) - -
- -

Contributors

- -This release includes 60 merged PRs by 18 authors. We would like to thank first-time contributor Jason Chu from the CockroachDB community. - -
diff --git a/src/current/_includes/releases/v1.0/beta-20170223.md b/src/current/_includes/releases/v1.0/beta-20170223.md deleted file mode 100644 index c2e19e21615..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170223.md +++ /dev/null @@ -1,37 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

SQL Language Changes

- -- Fixed parsing of interval constants with precision modifiers such as `INTERVAL '5' YEARS`. [#13702](https://github.com/cockroachdb/cockroach/pull/13702) -- String literals containing `true`, `false`, or their synonyms (but not placeholders or other expressions of type string) can now be used where a `BOOL` is required. [#13685](https://github.com/cockroachdb/cockroach/pull/13685) -- The underlying implementation of the `DECIMAL` type has been replaced. This may change the results of some calculations. The default rounding behavior for `DECIMAL` values is now "half away from zero", which is consistent with PostgreSQL. [#13551](https://github.com/cockroachdb/cockroach/pull/13551) -- The column headers in the results of `SHOW` commands are now lower-case to match PostgreSQL. [#13484](https://github.com/cockroachdb/cockroach/pull/13484) -- The `pg_index.indkey` column is now an `int2vector`, improving compatibility with Sequelize. [#13484](https://github.com/cockroachdb/cockroach/pull/13484) -- Improved support for arrays (still limited to the `pg_catalog` tables). [#13636](https://github.com/cockroachdb/cockroach/pull/13636) [#13535](https://github.com/cockroachdb/cockroach/pull/13535) -- The output of `SHOW USERS` is now sorted. [#13661](https://github.com/cockroachdb/cockroach/pull/13661) - -

Admin UI Changes

- -- A new graph displays the execution latency for SQL statements. [#12790](https://github.com/cockroachdb/cockroach/pull/12790) -- A new "All Events" page displays cluster history. [#13093](https://github.com/cockroachdb/cockroach/pull/13093) -- New metrics for round-trip time between nodes are now recorded. [#13533](https://github.com/cockroachdb/cockroach/pull/13533) -- Fixed-width fonts now correctly use Inconsolata. [#13631](https://github.com/cockroachdb/cockroach/pull/13631) - -

Bug Fixes

- -- Fixed a bug in error handling that could causes ranges to get stuck if an error occurred during a rebalance. [#13605](https://github.com/cockroachdb/cockroach/pull/13605) -- Retry table lease errors during schema changes, preventing them from getting stuck. [#13606](https://github.com/cockroachdb/cockroach/pull/13606) -- Inter-node connections are now closed on heartbeat timeouts, improving the handling of network partitions. [#13654](https://github.com/cockroachdb/cockroach/pull/13654) - -

Performance Improvements

- -- Metadata for queries using unqualified table names is now cached, speeding up these queries by 130%. [#13635](https://github.com/cockroachdb/cockroach/pull/13635) -- Eliminated an unnecessary sort phase in table scans, making `cockroach dump` usable on larger tables. [#13703](https://github.com/cockroachdb/cockroach/pull/13703) -- Leases and replicas will no longer be transferred to nodes that are draining. [#11724](https://github.com/cockroachdb/cockroach/pull/11724) [#13601](https://github.com/cockroachdb/cockroach/pull/13601) -- Reduced unnecessary copying and allocations. [#13660](https://github.com/cockroachdb/cockroach/pull/13660) [#13649](https://github.com/cockroachdb/cockroach/pull/13649) - -

Contributors

- -This release includes 57 merged PRs by 19 authors. diff --git a/src/current/_includes/releases/v1.0/beta-20170309.md b/src/current/_includes/releases/v1.0/beta-20170309.md deleted file mode 100644 index e50bb998396..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170309.md +++ /dev/null @@ -1,58 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Changes

- -- CockroachDB now uses Go 1.8. [#13673](https://github.com/cockroachdb/cockroach/pull/13673) -- CockroachDB now uses RocksDB 5.1.4. [#13831](https://github.com/cockroachdb/cockroach/pull/13831) - -

SQL Language Changes

- -- Table names are now released as soon as the [`DROP TABLE`](https://www.cockroachlabs.com/docs/v1.0/drop-table) statement executes, instead of being held until the asynchronous cleanup finishes. [#13908](https://github.com/cockroachdb/cockroach/pull/13908) -- Most write statements now accept a `RETURNING NOTHING` clause to indicate that the query should not return a result (not even the number of rows affected). This will be used in the future to enable optimizations. [#13802](https://github.com/cockroachdb/cockroach/pull/13802) -- PostgreSQL-style duration formats are now parsed case-insensitively. [#13748](https://github.com/cockroachdb/cockroach/pull/13748) -- Tables with no columns are now handled correctly in the network protocol. [#13765](https://github.com/cockroachdb/cockroach/pull/13765) -- `COPY` no longer requires the input data to end in a newline. [#13858](https://github.com/cockroachdb/cockroach/pull/13858) -- [`SHOW CONSTRAINTS`](https://www.cockroachlabs.com/docs/v1.0/show-constraints) no longer includes constraints involving hidden columns. [#13870](https://github.com/cockroachdb/cockroach/pull/13870) -- It is no longer possible to drop system tables. [#13911](https://github.com/cockroachdb/cockroach/pull/13911) -- The `DESC` modifier can now be applied to columns in a [`PRIMARY KEY`](https://www.cockroachlabs.com/docs/v1.0/primary-key) or [`UNIQUE`](https://www.cockroachlabs.com/docs/v1.0/unique) clause during table creation. [#13924](https://github.com/cockroachdb/cockroach/pull/13924) - -

Command-Line Interface Changes

- -- The [`cockroach dump`](https://www.cockroachlabs.com/docs/v1.0/sql-dump) command now takes an `--as-of` flag, which can be used to make a backup as of a particular time. -- When [starting a node](https://www.cockroachlabs.com/docs/v1.0/start-a-node), the details printed to `stdout` are now written to the node's `INFO` log as well. Also, these details now include node attributes and locality. [#1143](https://github.com/cockroachdb/docs/issues/1143), [#1144](https://github.com/cockroachdb/docs/issues/1144) - -

Bug Fixes

- -- Fixed (again) a bug in error handling that could causes ranges to get stuck if an error occurred during a rebalance. [#13763](https://github.com/cockroachdb/cockroach/pull/13763) -- Work around a bug that could cause GRPC connections to get stuck in a broken state. [#13822](https://github.com/cockroachdb/cockroach/pull/13822) -- Panic stack traces are now written to the `INFO` log file by default instead of to `stderr`. [#13890](https://github.com/cockroachdb/cockroach/pull/13890) - -

Performance Improvements

- -- Improved performance in high-contention scenarios by introducing a queue to wait for conflicting transactions to finish. This also reduces the likelihood of transaction restarts due to write/write conflicts. [#13501](https://github.com/cockroachdb/cockroach/pull/13501) -- A new lease implementation is now used for most ranges, reducing the number of writes that must be done just to maintain a lease. [#13826](https://github.com/cockroachdb/cockroach/pull/13826) -- Reduced the occurrence of `AmbiguousResultError` by waiting for pending RPCs to complete before returning. [#13800](https://github.com/cockroachdb/cockroach/pull/13800) -- Changed some mutexes to `RWMutexes`. Removed some instrumentation of mutex timing. [#13697](https://github.com/cockroachdb/cockroach/pull/13697) -- `cockroach dump` now works in larger batches. [#13812](https://github.com/cockroachdb/cockroach/pull/13812) -- Failed snapshots no longer disrupt regular raft messages. [#13816](https://github.com/cockroachdb/cockroach/pull/13816) -- Increased flow control parameters for GRPC connections, improving performance on high-latency links. [#13840](https://github.com/cockroachdb/cockroach/pull/13840) -- Increased limits on in-flight raft messages. [#13862](https://github.com/cockroachdb/cockroach/pull/13862) -- Range leases are now proactively transferred away from draining nodes. [#13792](https://github.com/cockroachdb/cockroach/pull/13792) -- Avoid sending redundant PushTxn calls in intent resolution. [#13955](https://github.com/cockroachdb/cockroach/pull/13955) - -

Doc Updates

- -- Added an FAQ on [auto-generating unique IDs in CockroachDB](https://www.cockroachlabs.com/docs/v1.0/sql-faqs#how-do-i-auto-generate-unique-row-ids-in-cockroachdb). [#1126](https://github.com/cockroachdb/docs/pull/1126) -- Expanded guidance on [using a SQL table as an alternative to direct key-value access](https://www.cockroachlabs.com/docs/v1.0/frequently-asked-questions#can-i-use-cockroachdb-as-a-key-value-store). [#1122](https://github.com/cockroachdb/docs/pull/1122) -- Added details about [using the `BIT` type to constrain integers](https://www.cockroachlabs.com/docs/v1.0/int#size) based on their corresponding binary values. [#1116](https://github.com/cockroachdb/docs/pull/1116) -- Added details about [building a binary](https://www.cockroachlabs.com/docs/v1.0/install-cockroachdb) that excludes enterprise functionality covered by the CockroachDB Community License (CCL). [#1130](https://github.com/cockroachdb/docs/pull/1130) - -
- -

Contributors

- -This release includes 124 merged PRs by 21 authors. We would like to thank first-time contributor Dmitry Vorobev from the CockroachDB community. - -
diff --git a/src/current/_includes/releases/v1.0/beta-20170323.md b/src/current/_includes/releases/v1.0/beta-20170323.md deleted file mode 100644 index 83c9c65c0d6..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170323.md +++ /dev/null @@ -1,80 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -- The `ALTER TABLE SPLIT AT` syntax has changed to support multiple splits with one command. Existing uses of `ALTER TABLE t SPLIT AT (x, y)` should be changed to `ALTER TABLE t SPLIT AT VALUES (x, y)`. [#14281](https://github.com/cockroachdb/cockroach/pull/14281) - -

Installation Changes

- -- The Homebrew formula has moved; to install CockroachDB with `brew`, use `brew install cockroachdb/cockroach/cockroach`. - -

General Changes

- -- CockroachDB now uses `jemalloc` version 4.5.0. [#14287](https://github.com/cockroachdb/cockroach/pull/14287) - -

SQL Language Changes

- -- [Collated strings](https://www.cockroachlabs.com/docs/v1.0/collate) are now fully supported and may be stored in columns. [#13609](https://github.com/cockroachdb/cockroach/pull/13609) -- The new `isnan()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) and special syntax `IS NAN` and `IS NOT NAN` can be used to detect floats that are `Not-a-Number`. [#14248](https://github.com/cockroachdb/cockroach/pull/14248) -- Variance and standard deviation calculations for integers and decimals now use additional precision internally. [#14013](https://github.com/cockroachdb/cockroach/pull/14013) -- Decimals with precisions greater than 16 now work correctly. [#14304](https://github.com/cockroachdb/cockroach/pull/14304) -- [`CREATE TABLE AS`](https://www.cockroachlabs.com/docs/v1.0/create-table-as) now works properly with NULL values. [#14247](https://github.com/cockroachdb/cockroach/pull/14247) -- The `SHOW application_name` statement is now supported. [#14085](https://github.com/cockroachdb/cockroach/pull/14085) -- CockroachDB now reports its version in two new places so that clients can distinguish CockroachDB from PostgreSQL: in the `crdb_version` variable in the network handshake, and in a new table `crdb_internal.build_info`. [#14145](https://github.com/cockroachdb/cockroach/pull/14145) -- `ALTER TABLE SPLIT AT` no longer reports an error if the table is already split at the designated point. [#14273](https://github.com/cockroachdb/cockroach/pull/14273) -- Casts involving OID and `regclass` types now behave more consistently with PostgreSQL. The `oid()` function is now available. [#13965](https://github.com/cockroachdb/cockroach/pull/13965) [#14022](https://github.com/cockroachdb/cockroach/pull/14022) [#14115](https://github.com/cockroachdb/cockroach/pull/14115) [#14260](https://github.com/cockroachdb/cockroach/pull/14260) -- Placeholder syntax may now be used for OID values. [#14255](https://github.com/cockroachdb/cockroach/pull/14255) -- Types with modifiers can now be cast to `::regtype`. [#14030](https://github.com/cockroachdb/cockroach/pull/14030) - -

Command-Line Interface Changes

- -- A new command `cockroach gen haproxy` is now available to generate an `haproxy` configuration file that can be used to access the cluster. [#14205](https://github.com/cockroachdb/cockroach/pull/14205) -- The [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) command now supports a `--pid-file` flag, which names a file to which the process ID will be written when the server is ready to handle requests. [#13996](https://github.com/cockroachdb/cockroach/pull/13996) -- If any environment variables used by `cockroach` are set but cannot be parsed, this is now treated as a fatal error. [#14124](https://github.com/cockroachdb/cockroach/pull/14124) - -

Admin UI Changes

- -- The log view now shows logs for the requested node. [#13947](https://github.com/cockroachdb/cockroach/pull/13947) -- The custom time range selector now uses UTC to match the graphs. [#14007](https://github.com/cockroachdb/cockroach/pull/14007) -- The admin UI now uses relative paths for all links and respects cookies, allowing it to be run behind authenticating proxies. [#14039](https://github.com/cockroachdb/cockroach/pull/14039) -- The guideline displayed when hovering over a graph is now positioned correctly. [#14008](https://github.com/cockroachdb/cockroach/pull/14008) - -

Bug Fixes

- -- [`UPSERT`](https://www.cockroachlabs.com/docs/v1.0/upsert) statements no longer incorrectly produce the error "cannot affect row a second time". [#10478](https://github.com/cockroachdb/cockroach/pull/10478) -- The replica allocator now explicitly recognizes when a node's health is unknown, and avoids situations in which a range could become critically under-replicated. [#14206](https://github.com/cockroachdb/cockroach/pull/14206) -- Fixed a problem with transaction conflicts immediately after a split that could cause 100% CPU usage. [#14305](https://github.com/cockroachdb/cockroach/pull/14305) -- The rebalancing system is better able to move replicas to match requested constraints. [#13973](https://github.com/cockroachdb/cockroach/pull/13973) -- Fixed a race that could lead to a panic when replicas are being moved frequently. [#14306](https://github.com/cockroachdb/cockroach/pull/14306) -- [`SHOW TABLES`](https://www.cockroachlabs.com/docs/v1.0/show-tables) no longer fails while a table is being dropped. [#14278](https://github.com/cockroachdb/cockroach/pull/14278) -- Fixed a crash in some [`EXPLAIN`](https://www.cockroachlabs.com/docs/v1.0/explain) statements. [#14244](https://github.com/cockroachdb/cockroach/pull/14244) -- [Name resolution](https://www.cockroachlabs.com/docs/v1.0/sql-name-resolution) no longer fails if non-existent databases are present in the search path. [#14006](https://github.com/cockroachdb/cockroach/pull/14006) -- Logging of "node joined the cluster" events for the UI is more reliable. [#14179](https://github.com/cockroachdb/cockroach/pull/14179) -- Memory accounting on macOS now correctly reports memory allocated by C code. [#14287](https://github.com/cockroachdb/cockroach/pull/14287) - -

Performance Improvements

- -- Range leases are now transferred to the replica closest to the source of load, when possible. This improves performance in geographically-distributed clusters. [#14268](https://github.com/cockroachdb/cockroach/pull/14268) -- The replica allocator will now actively attempt to move replicas away from no-longer-valid placements when the [zone configuration](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) changes. [#14106](https://github.com/cockroachdb/cockroach/pull/14106) -- Improved the performance of write-heavy workloads. [#14138](https://github.com/cockroachdb/cockroach/pull/14138) -- Improved caching of range descriptors. [#14217](https://github.com/cockroachdb/cockroach/pull/14217) -- Reduced overhead in the command queue. [#13982](https://github.com/cockroachdb/cockroach/pull/13982) -- CockroachDB now enables TCP keepalives on its sockets, improving responsiveness to clients that have disappeared (or load balancers with short timeouts). [#14063](https://github.com/cockroachdb/cockroach/pull/14063) -- Index backfill now only reads the columns necessary to perform the backfill. [#14271](https://github.com/cockroachdb/cockroach/pull/14271) - -

Doc Updates

- -- Expanded the tutorial on [orchestrating CockroachDB with Docker Swarm](https://www.cockroachlabs.com/docs/v1.0/orchestrate-cockroachdb-with-docker-swarm) to cover both secure and insecure clusters. [#1184](https://github.com/cockroachdb/docs/pull/1184) -- Added guidance on [importing data from PostgreSQL](https://www.cockroachlabs.com/docs/v1.0/import-data#import-from-postgresql-dump). [#1200](https://github.com/cockroachdb/docs/pull/1200) -- Added OS-specific instructions for [starting a local CockroachDB cluster in Docker](https://www.cockroachlabs.com/docs/v1.0/start-a-local-cluster-in-docker). [#1167](https://github.com/cockroachdb/docs/pull/1167) -- Improved the [install from binary instructions](https://www.cockroachlabs.com/docs/v1.0/install-cockroachdb) to include moving the binary into the `PATH`. [#1196](https://github.com/cockroachdb/docs/pull/1196) - -
- -

Contributors

- -This release includes 156 merged PRs by 23 authors. We would like to thank first-time contributor Jonas from the CockroachDB community. - -
diff --git a/src/current/_includes/releases/v1.0/beta-20170330.md b/src/current/_includes/releases/v1.0/beta-20170330.md deleted file mode 100644 index bc9f251e3ed..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170330.md +++ /dev/null @@ -1,43 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

SQL Language Changes

- -- `DEFAULT NULL` column modifiers no longer produce type errors. [#14371](https://github.com/cockroachdb/cockroach/pull/14371) -- Consecutive write statements using the `RETURNING NOTHING` clause are now executed in parallel. [#14188](https://github.com/cockroachdb/cockroach/pull/14188) -- [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) support for NaN, Inifinity, and negative zero. [#14302](https://github.com/cockroachdb/cockroach/pull/14302) - -

Command-Line Interface Changes

- -- The separate `INFO`, `WARNING`, and `ERROR` log files have been - replaced with a single file. [#14347](https://github.com/cockroachdb/cockroach/pull/14347) - -

Admin UI Changes

- -- The node list page now includes information about whether nodes are alive or dead. [#14258](https://github.com/cockroachdb/cockroach/pull/14258) -- Changed the names of some metrics related to lease transfers. [#14319](https://github.com/cockroachdb/cockroach/pull/14319) - -

Bug Fixes

- -- Fixed an inconsistency that could occur with one-phase transactions using the `SNAPSHOT` [isolation level](https://www.cockroachlabs.com/docs/v1.0/transactions#isolation-levels). [#14401](https://github.com/cockroachdb/cockroach/pull/14401) -- Prevented unexpected write intents on node liveness records, which could cause the cluster to become unavailable. [#14346](https://github.com/cockroachdb/cockroach/pull/14346) -- Fixed two "span use after Finish()" errors. [#14227](https://github.com/cockroachdb/cockroach/pull/14227) -- Fixed a rare panic in `raft.step()`. [#14344](https://github.com/cockroachdb/cockroach/pull/14344) - -

Performance Improvements

- -- Reads with timestamps in the past no longer block writes. [#14342](https://github.com/cockroachdb/cockroach/pull/14342) -- Improved node draining to prevent hanging `DROP` statements after a node restart. [#14341](https://github.com/cockroachdb/cockroach/pull/14341) -- Added support for compressing inter-node traffic with snappy. This will be enabled in a future release. [#14239](https://github.com/cockroachdb/cockroach/pull/14239) - -

Doc Updates

- -- Expanded the [Build a Node.js App with CockroachDB](https://www.cockroachlabs.com/docs/v1.0/build-a-nodejs-app-with-cockroachdb-sequelize) tutorial to feature the Sequelize ORM. [#1225](https://github.com/cockroachdb/docs/pull/1225) -- Expanded [Manual Deployment](https://www.cockroachlabs.com/docs/v1.0/manual-deployment) tutorials to cover HAProxy load balancing. [#1213](https://github.com/cockroachdb/docs/pull/1213) -- Expanded [Digital Ocean](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-digital-ocean) and [GCE](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-google-cloud-platform) deployment tutorials to cover cloud platform-managed load balancing. Load balancing guidance for AWS and Azure coming soon. [#1213](https://github.com/cockroachdb/docs/pull/1213) -- Improved guidance on using the `cockroach gen` command to generate CockroachDB resources, including [example SQL data](https://www.cockroachlabs.com/docs/v1.0/generate-cockroachdb-resources#generate-example-data) and an [HAProxy configuration file](https://www.cockroachlabs.com/docs/v1.0/generate-cockroachdb-resources#generate-an-haproxy-configuration-file) for a running cluster. [#1213](https://github.com/cockroachdb/docs/pull/1213) - -

Contributors

- -This release includes 56 merged PRs by 18 authors. diff --git a/src/current/_includes/releases/v1.0/beta-20170406.md b/src/current/_includes/releases/v1.0/beta-20170406.md deleted file mode 100644 index 218b47f316a..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170406.md +++ /dev/null @@ -1,5 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -{{site.data.alerts.callout_danger}}Some significant bugs have been observed when upgrading to this release from prior versions, so it has been withdrawn.{{site.data.alerts.end}} diff --git a/src/current/_includes/releases/v1.0/beta-20170413.md b/src/current/_includes/releases/v1.0/beta-20170413.md deleted file mode 100644 index 5ac4cf8ad65..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170413.md +++ /dev/null @@ -1,105 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -- The implementation of indexing for [collated string columns](https://www.cockroachlabs.com/docs/v1.0/collate) has changed. Any secondary indexes containing collated strings must be [dropped](https://www.cockroachlabs.com/docs/v1.0/drop-index) before upgrading to this version and then [re-created](https://www.cockroachlabs.com/docs/v1.0/create-index) after upgrading. Note that this applies only to secondary indexes, not to primary indexes. [#14528](https://github.com/cockroachdb/cockroach/pull/14528) - -- Any row with a `-0` [`FLOAT`](https://www.cockroachlabs.com/docs/v1.0/float) or [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) value in a primary key or index must be [deleted](https://www.cockroachlabs.com/docs/v1.0/delete) before upgrading to this version and then [re-inserted](https://www.cockroachlabs.com/docs/v1.0/insert) after upgrading. Otherwise, the row containing `-0` will show up during table scans, but it will not be possible to delete it or select it as part of an index scan. - - If you suspect `-0` values are present but do not know where, an alternative approach is to use [`cockroach dump`](https://www.cockroachlabs.com/docs/v1.0/sql-dump) to back up tables with `FLOAT` or `DECIMAL` columns, drop the tables before upgrading, and then use the backup files to recreate the tables and indexes after upgrading. [#14528](https://github.com/cockroachdb/cockroach/pull/14528) - -- The `experimental_strptime` and `experimental_strftime` [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) are deprecated and will be removed in a future release. If you are currently using these functions, please let us know [here](https://forum.cockroachlabs.com/t/are-you-using-experimental-strptime-or-experimental-strftime/545). - -- Several undocumented environment variables have been removed. [#14786](https://github.com/cockroachdb/cockroach/pull/14786) [#14797](https://github.com/cockroachdb/cockroach/pull/14797) -- Clusters which have used the `COCKROACH_PROPOSER_EVALUATED_KV` environment variable require special care when upgrading to this release; contact us for more information. [#14773](https://github.com/cockroachdb/cockroach/pull/14773) - -

Build Changes

- -- Building CockroachDB from source no longer uses `$GOPATH/bin` for anything except the final binary produced by `make install`. [#14629](https://github.com/cockroachdb/cockroach/pull/14629) -- Developers should use `make generate` instead of `go generate` to ensure the correct tool versions are used. [#14672](https://github.com/cockroachdb/cockroach/pull/14672) - -

SQL Language Changes

- -- SQL variables can now be reset to their default values with the `RESET` statement or `SET x TO DEFAULT`. [#14105](https://github.com/cockroachdb/cockroach/pull/14105) -- Schema changes within [transactions](https://www.cockroachlabs.com/docs/v1.0/transactions) are now restricted. Certain combinations of schema changes with other statements (which would previously hang) will now return an error. It is recommended that DDL statements be issued outside of transactions, or as the only statement within a transaction. [#14368](https://github.com/cockroachdb/cockroach/pull/14368) -- Subqueries are now supported as the source of `UPDATE` statements. [#14611](https://github.com/cockroachdb/cockroach/pull/14611) -- Timestamp parsing now understands the format used by JDBC's `setTimestamp`. [#14712](https://github.com/cockroachdb/cockroach/pull/14712) -- [`INTERVAL`](https://www.cockroachlabs.com/docs/v1.0/interval) values larger than a month now use the suffix `mon` instead of `m` (which is also used for minutes). Intervals can now be parsed from strings containing decimals and negative numbers. [#14534](https://github.com/cockroachdb/cockroach/pull/14534) -- [`INTERVAL`](https://www.cockroachlabs.com/docs/v1.0/interval) values now support multiplication and division by [`FLOAT`](https://www.cockroachlabs.com/docs/v1.0/float) values. [#14614](https://github.com/cockroachdb/cockroach/pull/14614) -- Arithmetic operations on integers now check for overflow. [#14682](https://github.com/cockroachdb/cockroach/pull/14682) -- Comparisons involving tuples with non-identical types are now allowed whenever the comparison of the individual elements would be allowed. [#14674](https://github.com/cockroachdb/cockroach/pull/14674) -- The implementation of the `experimental_strptime` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) is now consistent across platforms. [#14801](https://github.com/cockroachdb/cockroach/pull/14801) -- `BETWEEN` now supports any combination of types that would be supported by individual comparison operators. [#14754](https://github.com/cockroachdb/cockroach/pull/14754) -- Handling of untyped literals during overload resolution is now more consistent. [#14593](https://github.com/cockroachdb/cockroach/pull/14593) -- [`ROLLBACK TO SAVEPOINT`](https://www.cockroachlabs.com/docs/v1.0/rollback-transaction) is now allowed at any time, not only after receiving a "retry transaction" error. [#14538](https://github.com/cockroachdb/cockroach/pull/14538) -- [`CREATE TABLE AS SELECT`](https://www.cockroachlabs.com/docs/v1.0/create-table-as) now returns the number of rows affected to the SQL client driver. [#13187](https://github.com/cockroachdb/cockroach/pull/13187) -- [`ALTER TABLE .. ADD CONSTRAINT`](https://www.cockroachlabs.com/docs/v1.0/add-constraint) can now be used to add a self-referential foreign key. [#14711](https://github.com/cockroachdb/cockroach/pull/14711) -- The `RETURNING NOTHING` clause can now be used outside of a transaction, although it does not provide any performance benefit in this case. [#14551](https://github.com/cockroachdb/cockroach/pull/14551) -- [SQL user names](https://www.cockroachlabs.com/docs/v1.0/create-user) are now required to start with a letter, contain only letters, numbers, and underscores, and be no longer than 63 characters. [#14525](https://github.com/cockroachdb/cockroach/pull/14525) -- The [`BYTES`](https://www.cockroachlabs.com/docs/v1.0/bytes) type name is now reported as `bytea` in `pg_catalog.pg_type` for compatibility with PostgreSQL. [#14495](https://github.com/cockroachdb/cockroach/pull/14495) -- The PostgreSQL type IO functions (like `int8in`, `int8out`, `int8send`, and `int8recv`) have placeholder implementations for compatibility with some ORMs. [#14529](https://github.com/cockroachdb/cockroach/pull/14529) - -

Command-Line Interface Changes

- -- The [`cockroach zone set`](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) command now recognizes the special zone names `.meta`, `.timeseries`, and `.system` to set configurations for system metadata. [#14740](https://github.com/cockroachdb/cockroach/pull/14740) -- The [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) command no longer uses the `COCKROACH_PORT` environment variable; the port must be set on the server's command line (similar to `--host` and other flags). [#14612](https://github.com/cockroachdb/cockroach/pull/14612) -- The [`cockroach quit`](https://www.cockroachlabs.com/docs/v1.0/stop-a-node) command is now more robust. [#14775](https://github.com/cockroachdb/cockroach/pull/14775) -- Command history no longer corrupts previous commands when normalizing them. [#14496](https://github.com/cockroachdb/cockroach/pull/14496) -- Commands entered as a single line are now saved together in the history. [#14846](https://github.com/cockroachdb/cockroach/pull/14846) -- The config file generated by [`cockroach gen haproxy`](https://www.cockroachlabs.com/docs/v1.0/generate-cockroachdb-resources) now enables health checks for the server. [#14624](https://github.com/cockroachdb/cockroach/pull/14624) -- The `--store` and `--log-dir` flags now reject arguments starting with `~` that were not expanded by the shell. [#14746](https://github.com/cockroachdb/cockroach/pull/14746) - -

Admin UI Changes

- -- The **Node List** page has been improved. [#14395](https://github.com/cockroachdb/cockroach/pull/14395) -- A new `node-id` metric with labels for the node's address is now exported for use in external monitoring systems. [#14544](https://github.com/cockroachdb/cockroach/pull/14544) -- The `/debug/` pages are now accessible only from `localhost` by default. Set the environment variable `COCKROACH_REMOTE_DEBUG=any` when running the server to allow remote access. [#14647](https://github.com/cockroachdb/cockroach/pull/14647) [#14675](https://github.com/cockroachdb/cockroach/pull/14675) - -

Performance Improvements

- -- A new, more efficient format is now used for indexes with the [`STORING`](https://www.cockroachlabs.com/docs/v1.0/create-index#store-columns) clause. Existing indexes will still work, but will need to be dropped and recreated to get the benefits of the new format. [#14758](https://github.com/cockroachdb/cockroach/pull/14758) -- A single `TimestampCache` is now shared by all replicas on a store, providing better control over memory usage and better support for large transactions. [#14489](https://github.com/cockroachdb/cockroach/pull/14489) -- Reduced memory used by the `TimestampCache`. [#14516](https://github.com/cockroachdb/cockroach/pull/14516) [#14514](https://github.com/cockroachdb/cockroach/pull/14514) -- Replica garbage collection is now much more efficient. [#14391](https://github.com/cockroachdb/cockroach/pull/14391) -- [Adding](https://www.cockroachlabs.com/docs/v1.0/add-column) and [dropping](https://www.cockroachlabs.com/docs/v1.0/drop-column) columns now uses the distributed SQL engine. [#14331](https://github.com/cockroachdb/cockroach/pull/14331) -- The health of inter-node connections is now monitored using HTTP/2 ping frames instead of regular RPCs. This ensures that HTTP flow control cannot cause a connection to be considered unhealthy. [#14424](https://github.com/cockroachdb/cockroach/pull/14424) -- The limit on the number of concurrent inter-node RPCs has been increased. [#14785](https://github.com/cockroachdb/cockroach/pull/14785) -- The bandwidth used by range snapshots is now limited to 2MB/sec by default to reduce impact on live traffic. [#14718](https://github.com/cockroachdb/cockroach/pull/14718) -- If a single row grows larger than the configured maximum range size, the split queue will no longer spin trying to split the range repeatedly. [#14654](https://github.com/cockroachdb/cockroach/pull/14654) -- Several kinds of tracing and logging have been disabled by default. [#14677](https://github.com/cockroachdb/cockroach/pull/14677) - -

Bug Fixes

- -- Fixed an issue with [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) and [`DELETE`](https://www.cockroachlabs.com/docs/v1.0/delete) on tables with [`STORING`](https://www.cockroachlabs.com/docs/v1.0/create-index#store-columns) indexes. Indexes affected by this bug will need to be dropped and recreated. [#14643](https://github.com/cockroachdb/cockroach/pull/14643) -- `BETWEEN` now handles `NULL` in the same way as other comparison operators. [#14686](https://github.com/cockroachdb/cockroach/pull/14686) -- Fixed a bug with comparisons involving division with a constant negative divisor. [#14702](https://github.com/cockroachdb/cockroach/pull/14702) -- [`INSERT ON CONFLICT DO UPDATE`](https://www.cockroachlabs.com/docs/v1.0/insert#update-values-on-conflict) no longer uses an error-prone optimization. The [`UPSERT`](https://www.cockroachlabs.com/docs/v1.0/upsert) statement is now faster than the equivalent `INSERT ON CONFLICT DO UPDATE` statement. [#14485](https://github.com/cockroachdb/cockroach/pull/14485) -- Schema changes now abort more reliably when encountering errors. [#14025](https://github.com/cockroachdb/cockroach/pull/14025) -- Joining nodes of two different clusters is now detected more reliably. [#14475](https://github.com/cockroachdb/cockroach/pull/14475) -- Fixed a panic during prepared [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) statements. [#14481](https://github.com/cockroachdb/cockroach/pull/14481) -- [`FLOAT`](https://www.cockroachlabs.com/docs/v1.0/float) and [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) values in primary keys and indexes now consider `0` and `-0` equal (although the sign will correctly be retained). [#14528](https://github.com/cockroachdb/cockroach/pull/14528) -- [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) values in primary keys and indexes no longer have zeros truncated. If a secondary index contains a `DECIMAL` value with trailing zeros, you can [drop](https://www.cockroachlabs.com/docs/v1.0/drop-index) and [re-create](https://www.cockroachlabs.com/docs/v1.0/create-index) the index to have the value returned correctly when the index is used. [#14528](https://github.com/cockroachdb/cockroach/pull/14528) -- Fixed a crash when assigning with the `DEFAULT` keyword in [`UPDATE`](https://www.cockroachlabs.com/docs/v1.0/update) statements. [#14640](https://github.com/cockroachdb/cockroach/pull/14640) - -

Doc Updates

- -- Updated the [Start a Local Cluster](https://www.cockroachlabs.com/docs/v1.0/start-a-local-cluster) tutorial to better highlight core CockroachDB capabilities such as automated replications and fault tolerance. [#1257](https://github.com/cockroachdb/docs/pull/1257) -- Expanded the [Build a Ruby App with CockroachDB](https://www.cockroachlabs.com/docs/v1.0/build-a-ruby-app-with-cockroachdb-activerecord) tutorial to feature the ActiveRecord ORM. [#1234](https://github.com/cockroachdb/docs/pull/1234) -- Clarified the `--store` flag in the [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) documentation. [#1243](https://github.com/cockroachdb/docs/pull/1243) -- Updated the [build from source](https://www.cockroachlabs.com/docs/v1.0/install-cockroachdb) instructions to use a source tarball instead of the `cockroach` GitHub repository. The source tarball downloads faster and doesn't need to be extracted in the `GOPATH`. Developers who want to contribute to CockroachDB should use the instructions in [CONTRIBUTING.md](https://github.com/cockroachdb/cockroach/blob/master/CONTRIBUTING.md) instead. [#1209](https://github.com/cockroachdb/docs/issues/1209) -- Added Google Cloud Spanner to the [CockroachDB in Comparison](https://www.cockroachlabs.com/docs/v1.0/cockroachdb-in-comparison) chart. [#1264](https://github.com/cockroachdb/docs/pull/1264) - -
- -

Contributors

- -This release includes 215 merged PRs by 27 authors. We would like to thank the following contributors from the CockroachDB community, especially first-time contributors Amos Bird and Daniel Upton. - -- Amos Bird -- Christian Meunier -- Daniel Upton -- songhao - -
diff --git a/src/current/_includes/releases/v1.0/beta-20170420.md b/src/current/_includes/releases/v1.0/beta-20170420.md deleted file mode 100644 index 8a57a135167..00000000000 --- a/src/current/_includes/releases/v1.0/beta-20170420.md +++ /dev/null @@ -1,69 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -- Security-related command-line arguments have been changed. Instead of using multiple flags, certificates and keys are now loaded from a single [`--certs-dir`](https://www.cockroachlabs.com/docs/v1.0/create-security-certificates#certificate-directory) flag, which defaults to `~/.cockroach-certs` but can be set to a custom location. The individual flags still work in this release but will be removed in the next one. Also note that the `--insecure` flag is now **required** for insecure mode. [#14703](https://github.com/cockroachdb/cockroach/pull/14703) -- The `--alsologtostderr` argument has been renamed to `--logtostderr`, and now takes an optional argument to specify a threshold. [#14841](https://github.com/cockroachdb/cockroach/pull/14841) -- The `^` operator now means `pow()` instead of bitwise xor (bitwise xor uses the `#` operator; this is consistent with PostgreSQL). Any tables with this operator in a [`DEFAULT`](https://www.cockroachlabs.com/docs/v1.0/default-value) or [`CHECK`](https://www.cockroachlabs.com/docs/v1.0/check) expression will need to be updated. [#14882](https://github.com/cockroachdb/cockroach/pull/14882) - -

Upgrade Notes

- -- This release cannot run at the same time as releases prior to [`beta-20170413`](#beta-20170413). If you are running an older version, you must either upgrade to that version first or take down all servers in the cluster before starting any server with this version. This release includes new functionality to make future upgrades smoother. [#14959](https://github.com/cockroachdb/cockroach/pull/14959) - -

Build Changes

- -- CockroachDB's build system has been rewritten to improve portability. It can no longer be installed with `go get` alone; use of the `Makefile` is now required. GNU Make, CMake, and XZ Utils must also be installed to build from source. As a result, CockroachDB can now be built on a larger variety of systems. [#14840](https://github.com/cockroachdb/cockroach/pull/14840) - -

SQL Language Changes

- -- Type errors in [`CHECK`](https://www.cockroachlabs.com/docs/v1.0/check) constraints are now detected earlier. [#14984](https://github.com/cockroachdb/cockroach/pull/14984) -- Aggregation functions now work correctly above the windowing level. [#14729](https://github.com/cockroachdb/cockroach/pull/14729) -- The default precision for inexact decimal operations is now 20 digits (up from 16). [#15001](https://github.com/cockroachdb/cockroach/pull/15001) -- Bounds are now checked when converting [`FLOAT`](https://www.cockroachlabs.com/docs/v1.0/float) or [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) values to [`INTEGER`](https://www.cockroachlabs.com/docs/v1.0/int). [#14967](https://github.com/cockroachdb/cockroach/pull/14967) -- The `pg_catalog` data for `int2vector` is now consistent with PostgreSQL. [#14989](https://github.com/cockroachdb/cockroach/pull/14989) - -

Command-Line Interface Changes

- -- The [`cockroach cert`](https://www.cockroachlabs.com/docs/v1.0/create-security-certificates) commands have been updated to reflect the new `--certs-dir` convention. [#14703](https://github.com/cockroachdb/cockroach/pull/14703) -- New flags `--log-file-max-size` (default 10 MiB) and `--log-dir-max-size` (default 100 MiB) can be used to control log rotation and deletion. The `--log-file-verbosity` flag (default `INFO`) can be used to control the verbosity of the log file. [#14841](https://github.com/cockroachdb/cockroach/pull/14841) -- Critical errors are now written to `stderr` regardless of the logging configuration. [#14926](https://github.com/cockroachdb/cockroach/pull/14926) [#15025](https://github.com/cockroachdb/cockroach/pull/15025) [#15040](https://github.com/cockroachdb/cockroach/pull/15040) -- The server will now reload its certificates and keys on a `SIGHUP`. [#14925](https://github.com/cockroachdb/cockroach/pull/14925) -- Some commands which previously required a `node` certificate now work with the `root` client certificate. [#14972](https://github.com/cockroachdb/cockroach/pull/14972) -- The undocumented `cockroach debug kv` commands have been removed. [#14857](https://github.com/cockroachdb/cockroach/pull/14857) -- The [`cockroach quit`](https://www.cockroachlabs.com/docs/v1.0/stop-a-node) command now has a hard timeout of 1 minute if the cluster has lost quorum. [#14708](https://github.com/cockroachdb/cockroach/pull/14708) - -

Admin UI Changes

- -- Improved alert banners. [#14915](https://github.com/cockroachdb/cockroach/pull/14915) [#15010](https://github.com/cockroachdb/cockroach/pull/15010) - -

Bug Fixes

- -- Transactions no longer get stuck forever when a split occurs while they are waiting for a conflicting transaction. [#14974](https://github.com/cockroachdb/cockroach/pull/14974) -- The Admin UI can now display tables with non-lowercase names. [#14818](https://github.com/cockroachdb/cockroach/pull/14818) -- Fixed a panic with "span and descriptor's range do not overlap". [#14952](https://github.com/cockroachdb/cockroach/pull/14952) -- Fixed a data race in the Prometheus exporter. [#14943](https://github.com/cockroachdb/cockroach/pull/14943) - -

Performance Improvements

- -- The timestamp cache is now much more efficient, leading to a 42% speedup on single-node write-only benchmarks. [#14867](https://github.com/cockroachdb/cockroach/pull/14867) -- Transaction deadlocks are now detected faster. [#14789](https://github.com/cockroachdb/cockroach/pull/14789) -- Fixed a memory "leak" in Raft. [#15041](https://github.com/cockroachdb/cockroach/pull/15041) -- Improved performance of concurrent RPCs on a single GRPC connection. [#14939](https://github.com/cockroachdb/cockroach/pull/14939) -- Raft processing now uses more threads. [#14897](https://github.com/cockroachdb/cockroach/pull/14897) -- Improved performance of schema changes that add indexes. [#14937](https://github.com/cockroachdb/cockroach/pull/14937) -- Reduced the number of cgo calls for RocksDB iterators. [#14920](https://github.com/cockroachdb/cockroach/pull/14920) - -

Doc Updates

- -- Expanded the [`TRUNCATE`](https://www.cockroachlabs.com/docs/v1.0/truncate) documentation to cover using `CASCADE` to truncate dependent tables. [#1297](https://github.com/cockroachdb/docs/pull/1297) -- Minor improvements to the [`ROLLBACK`](https://www.cockroachlabs.com/docs/v1.0/rollback-transaction) documentation. [#1296](https://github.com/cockroachdb/docs/pull/1296) - -
- -

Contributors

- -This release includes 101 merged PRs by 21 authors. We would like to thank first-time contributor xphoniex from the CockroachDB community. - -
diff --git a/src/current/_includes/releases/v1.0/v1.0-rc.1.md b/src/current/_includes/releases/v1.0/v1.0-rc.1.md deleted file mode 100644 index b4ec9c02db3..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0-rc.1.md +++ /dev/null @@ -1,111 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Backwards-Incompatible Changes

- -- It is not possible to upgrade directly to this version of CockroachDB from a version older than [`beta-20170413`](#beta-20170413). Upgrade any older clusters to that version first. [#15228](https://github.com/cockroachdb/cockroach/pull/15228) -- The [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) command no longer recognizes the `COCKROACH_INSECURE` and `COCKROACH_CERTS_DIR` environment variables which are intended to be client-only. Use the `--insecure` and `--certs-dir` command-line flags instead. [#15196](https://github.com/cockroachdb/cockroach/pull/15196) [#15288](https://github.com/cockroachdb/cockroach/pull/15288) -- Older certificate-related command-line flags have been removed; use `--certs-dir` instead. [#15241](https://github.com/cockroachdb/cockroach/pull/15241) -- Various environment variables have been removed in favor of the new `CLUSTER SETTING` commands described below. -- The undocumented `SET SYNTAX` statement and the "modern" syntax variant have been removed. [#15064](https://github.com/cockroachdb/cockroach/pull/15064) - -

General Changes

- -- [Anonymous diagnostics statistics](https://www.cockroachlabs.com/docs/v1.0/diagnostics-reporting) are now reported by default. To opt out, set `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING=true` in the environment when starting the cluster, or use `SET CLUSTER SETTING diagnostics.reporting.enabled = false` in the SQL shell on clusters already running. [#14901](https://github.com/cockroachdb/cockroach/pull/14901) - -

Build Changes

- -- CockroachDB now builds on Windows. Windows support is currently experimental and is recommended only for development, not production. -- Building from source on macOS 10.11 works again. [#15168](https://github.com/cockroachdb/cockroach/pull/15168) -- Building from source on older CPUs (without SSE4.2) works again. [#15247](https://github.com/cockroachdb/cockroach/pull/15247) - -

Enterprise Features

- -- Distributed backup and restore capabilities are now available to [enterprise-license customers](https://www.cockroachlabs.com/cockroachdb-community-license/). Using the new [`BACKUP`](https://www.cockroachlabs.com/docs/v1.0/backup) SQL statement, a cluster's schema and data can be backed up in full or incrementally as of a given timestamp, and can be stored on the platforms you're already using, including AWS S3, Google Cloud Storage, NFS, or HTTP storage. In the unlikely event of irreparable cluster failure, the new [`RESTORE`](https://www.cockroachlabs.com/docs/v1.0/restore) SQL statement can be used to restore a cluster from enterprise backups. Importantly, backup and restore operations are distributed across the nodes of a cluster, minimizing the impact on cluster performance. - - For details about pricing, [contact Cockroach Labs](https://www.cockroachlabs.com/get-started-cockroachdb/). - -

SQL Language Changes

- -- Table names and other identifiers are now case-insensitive in all cases, even when used in double quotes. These identifiers will be shown in lowercase in many introspection interfaces. This is consistent with PostgreSQL and improves compatibility with the PostgreSQL JDBC driver. [#15434](https://github.com/cockroachdb/cockroach/pull/15434) -- New `SHOW ALL CLUSTER SETTINGS` and `SET CLUSTER SETTING` statements can be used to view and change certain [cluster-wide settings](https://www.cockroachlabs.com/docs/v1.0/cluster-settings). The following cluster settings are available in this release: - - `diagnostics.reporting.enabled` - - `diagnostics.reporting.report_metrics` - - `diagnostics.reporting.send_crash_reports` - - `kv.allocator.lease_rebalancing_aggressiveness` - - `kv.allocator.load_based_lease_rebalancing.enabled` - - `kv.raft.command.max_size` - - `kv.raft_log.synchronize` - - `kv.snapshot_rebalance.max_rate` - - `kv.snapshot_recovery.max_rate` - - `kv.transaction.max_intents` - - `server.remote_debugging.mode` - - `sql.defaults.distsql` - - `sql.metrics.statement_details.dump_to_logs` - - `sql.metrics.statement_details.enabled` - - `sql.metrics.statement_details.threshold` - - `sql.trace.log_statement_execute` - - `sql.trace.session_eventlog.enabled` - - `sql.trace.txn.enable_threshold` -- The `PREPARE` and `EXECUTE` statements can now be used to use prepared statements in a SQL shell. [#15256](https://github.com/cockroachdb/cockroach/pull/15256) -- The `regexp_replace()` [function](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) now handles newlines correctly, both with and without the `n` flag. [#15309](https://github.com/cockroachdb/cockroach/pull/15309) -- Precedence for the binary `~` [operator](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#operators) (regex match) has been changed. This could cause [`DEFAULT`](https://www.cockroachlabs.com/docs/v1.0/default-value) or [`CHECK`](https://www.cockroachlabs.com/docs/v1.0/check) expressions to have different behavior if `~` was used in combination with other operators. These will need to be fixed manually. [#15264](https://github.com/cockroachdb/cockroach/pull/15264) -- Ambiguous results now return the PostgreSQL "statement completion unknown" error code. [#15054](https://github.com/cockroachdb/cockroach/pull/15054) -- Time zones can now be specified in more formats in [`DATE`](https://www.cockroachlabs.com/docs/v1.0/date) types, including `+0700`, `+07`, `+7`, and `+07:00`. [#15177](https://github.com/cockroachdb/cockroach/pull/15177) -- Set-returning [functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators) (like `generate_series()`) are now supported in more contexts. [#14369](https://github.com/cockroachdb/cockroach/pull/14369) - -

Admin UI Changes

- -- Tooltips are now shown on graphs. [#15282](https://github.com/cockroachdb/cockroach/pull/15282) -- Fixed elements that overlapped incorrectly. [#15387](https://github.com/cockroachdb/cockroach/pull/15387) -- Improved handling of long strings in table cells. [#15387](https://github.com/cockroachdb/cockroach/pull/15387) -- The SQL statement latency graphs now include some overhead that was not previously accounted for. [#15176](https://github.com/cockroachdb/cockroach/pull/15176) - -

Command-Line Interface Changes

- -- The default lifetime of certificates generated by [`cockroach cert`](https://www.cockroachlabs.com/docs/v1.0/create-security-certificates) has been increased to 10 years. [#15306](https://github.com/cockroachdb/cockroach/pull/15306) -- The output format of [`cockroach cert list`](https://www.cockroachlabs.com/docs/v1.0/create-security-certificates) has been improved. [#15198](https://github.com/cockroachdb/cockroach/pull/15198) - -

Bug Fixes

- -- Table scans over [interleaved tables](https://www.cockroachlabs.com/docs/v1.0/interleave-in-parent) now work correctly. [#15043](https://github.com/cockroachdb/cockroach/pull/15043) -- Fixed backslash escaping when used with the Python `psycopg2` driver. [#15281](https://github.com/cockroachdb/cockroach/pull/15281) -- Improved certificate validation, fixing compatibility with client certs as used by `psql`. [#15286](https://github.com/cockroachdb/cockroach/pull/15286) -- Fixed a race between lease transfers and repair/rebalance operations. [#15355](https://github.com/cockroachdb/cockroach/pull/15355) -- Creating a table with a bad `DEFAULT` expression no longer panics. [#15170](https://github.com/cockroachdb/cockroach/pull/15170) -- Improved tracking of memory used by aggregate functions. [#14904](https://github.com/cockroachdb/cockroach/pull/14904) -- Rebalancing now only considers ranges for which all replicas are healthy. [#15312](https://github.com/cockroachdb/cockroach/pull/15312) -- Writes to the raft log are now fully synced to disk. [#15366](https://github.com/cockroachdb/cockroach/pull/15366) -- Fixed a hang during node shutdown. [#15465](https://github.com/cockroachdb/cockroach/pull/15465) -- Fixed "requested entry at index" log spam from `raft_log_queue.go`. [#15466](https://github.com/cockroachdb/cockroach/pull/15466) - -

Performance Improvements

- -- A new distributed SQL execution engine is now used for a large subset of [`SELECT`](https://www.cockroachlabs.com/docs/v1.0/select) statements. Instead of returning all table data involved in the query to the gateway node for final computations, computation is distributed between the gateway node and the nodes where the data resides, with just the relevant results returned to the gateway node. This deeper distribution of work significantly improves performance of queries, especially those using [aggregation functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions) or [joins](https://www.cockroachlabs.com/docs/v1.0/table-expressions#join-expressions). -- Smoothed out request stats used by lease rebalancing and reduced thrashing. [#15156](https://github.com/cockroachdb/cockroach/pull/15156) [#15321](https://github.com/cockroachdb/cockroach/pull/15321) -- Improved retry logic after lease failures. [#15199](https://github.com/cockroachdb/cockroach/pull/15199) -- Snapshots that are necessary for recovery are now given higher recovery than those used for rebalancing. [#15279](https://github.com/cockroachdb/cockroach/pull/15279) -- Very large transactions and commands that might cause the server to run out of memory are now rejected earlier. [#15350](https://github.com/cockroachdb/cockroach/pull/15350) -- Node liveness heartbeats are now retried when they fail due to heavy load. [#15362](https://github.com/cockroachdb/cockroach/pull/15362) -- Improved priority adjustments when transactions are pushed. [#7380](https://github.com/cockroachdb/cockroach/pull/7380) - -

Doc Updates

- -- Expanded documentation on [backing up](https://www.cockroachlabs.com/docs/v1.0/back-up-data) and [restoring data](https://www.cockroachlabs.com/docs/v1.0/restore-data) to cover both distributed capabilities covered by our enterprise license as well as non-distributed dump and import functionality. [#1239](https://github.com/cockroachdb/docs/pull/1239) -- Added [SQL-specific FAQs](https://www.cockroachlabs.com/docs/v1.0/sql-faqs). [#1315](https://github.com/cockroachdb/docs/pull/1315) -- Documented the [order in which operators are evaluated](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#operators) within statements. [#1322](https://github.com/cockroachdb/docs/pull/1322) -- Documented the known list of [differences between PostgreSQL and CockroachDB](https://www.cockroachlabs.com/docs/v1.0/porting-postgres) for identical SQL input, with porting instructions. [#1328](https://github.com/cockroachdb/docs/pull/1328) -- Expanded best practices when [using the `--locality` setting](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings) to replicate evenly across datacenters. [#1334](https://github.com/cockroachdb/docs/pull/1334) - -
- -

Contributors

- -This release includes 185 merged PRs by 27 authors. We would like to thank the following contributors from the CockroachDB community, including first-time contributors Evgeniy Vasilev and Mahmoud Al-Qudsi. - -- Evgeniy Vasilev -- Kenji Kaneda -- Mahmoud Al-Qudsi - -
diff --git a/src/current/_includes/releases/v1.0/v1.0-rc.2.md b/src/current/_includes/releases/v1.0/v1.0-rc.2.md deleted file mode 100644 index 5e22c4ad3ff..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0-rc.2.md +++ /dev/null @@ -1,65 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Changes

- -- [Diagnostics reporting](https://www.cockroachlabs.com/docs/v1.0/diagnostics-reporting) now includes anonymized query statistics. [#15491](https://github.com/cockroachdb/cockroach/pull/15491) - -

SQL Language Changes

- -- Restrictions on schema changes in transactions with other statements have been relaxed. It is now possible to insert into a table in the same transaction that created it, provided there are no [`FOREIGN KEY`](https://www.cockroachlabs.com/docs/v1.0/foreign-key) or [`INTERLEAVE`](https://www.cockroachlabs.com/docs/v1.0/interleave-in-parent) clauses. [#15511](https://github.com/cockroachdb/cockroach/pull/15511) -- The distributed SQL engine supports queries that internally generate rows with no columns. [#15420](https://github.com/cockroachdb/cockroach/pull/15420) -- New variables added to the [cluster-wide settings](https://www.cockroachlabs.com/docs/v1.0/cluster-settings): `server.declined_reservation_timeout`, `server.failed_reservation_timeout`, and `server.time_until_store_dead`. [#15581](https://github.com/cockroachdb/cockroach/pull/15581) - -

Command-Line Interface Changes

- -- If the new flag `--listening-url-file` is passed to [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node), the server will write its SQL connection URL to the specified file on successful startup, in addition to printing it to `stdout`. This is particularly helpful in identifying the node's port when an unused port is assigned automatically (`--port=0`).[#15468](https://github.com/cockroachdb/cockroach/pull/15468) -- The [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell no longer buffers all results in memory before printing them. [#15277](https://github.com/cockroachdb/cockroach/pull/15277) -- The log file is now flushed more reliably when a node exits. [#15531](https://github.com/cockroachdb/cockroach/pull/15531) -- The new flag `--max-offset` on the [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) command defines the maximum allowed clock offset for the cluster and replaces the environment variable `COCKROACH_MAX_OFFSET`. Note that the `--max-offset` value must be the same on all nodes in the cluster. In order to change it, every node in the cluster must be stopped and restarted with the new value. [#15568](https://github.com/cockroachdb/cockroach/pull/15568) -- The `--raft-tick-interval` flag has been removed from the [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) command. [#15547](https://github.com/cockroachdb/cockroach/pull/15547) -- Certificate and key loading errors during node startup are now reported to the user via `stdout` as well as being written to the log file. [#15655](https://github.com/cockroachdb/cockroach/pull/15655) - -

Admin UI Changes

- -- The number of transactions that would be affected by a change in isolation level is now tracked. [#15234](https://github.com/cockroachdb/cockroach/pull/15234) -- Improved style of the single node and log pages. [#15486](https://github.com/cockroachdb/cockroach/pull/15486) -- The event log now refreshes automatically. [#15502](https://github.com/cockroachdb/cockroach/pull/15502) -- Graphs on the overview page now have tooltips. [#15624](https://github.com/cockroachdb/cockroach/pull/15624) - -

Bug Fixes

- -- Fixed a panic when a query returning a constant `NULL` uses the new distributed execution engine. [#15530](https://github.com/cockroachdb/cockroach/pull/15530) -- Fixed two issues that could cause requests to get stuck indefinitely. [#15557](https://github.com/cockroachdb/cockroach/pull/15557) [#15573](https://github.com/cockroachdb/cockroach/pull/15573) -- Fixed a rare crash when extending table leases. [#15560](https://github.com/cockroachdb/cockroach/pull/15560) -- Fixed a crash when a transaction that uses [`SAVEPOINT cockroach_restart`](https://www.cockroachlabs.com/docs/v1.0/savepoint) does something other than `ROLLBACK TO SAVEPOINT` after an error. [#15563](https://github.com/cockroachdb/cockroach/pull/15563) -- Fixed [`RESTORE`](https://www.cockroachlabs.com/docs/v1.0/restore) for interleaved tables. [#15564](https://github.com/cockroachdb/cockroach/pull/15564) -- Return parse error instead of panic when constant typechecking fails. [#15636](https://github.com/cockroachdb/cockroach/pull/15636) -- Prevent persistent distsql and schema change errors after a node dies. [#15368](https://github.com/cockroachdb/cockroach/pull/15368) -- Previously, [`ALTER TABLE ADD COLUMN`](https://www.cockroachlabs.com/docs/v1.0/add-column) with a `DEFAULT unique_rowid()` specification could create many duplicate values. This has been corrected. [#15628](https://github.com/cockroachdb/cockroach/pull/15628) -- Fixed a consistency error in [`BACKUP`](https://www.cockroachlabs.com/docs/v1.0/backup). [#15642](https://github.com/cockroachdb/cockroach/pull/15642) -- Fixed incorrect NULL results when using an aggregation with an empty table. [#15688](https://github.com/cockroachdb/cockroach/pull/15688) - -

Performance Improvements

- -- Reduced thrashing in gossip after a rolling restart. [#15533](https://github.com/cockroachdb/cockroach/pull/15533) -- [`RESTORE`](https://www.cockroachlabs.com/docs/v1.0/restore) performs its pre-splitting less aggressively. [#15548](https://github.com/cockroachdb/cockroach/pull/15548) -- Improved memory accounting in distributed SQL. [#15595](https://github.com/cockroachdb/cockroach/pull/15595), [#15610](https://github.com/cockroachdb/cockroach/pull/15610), [#15623](https://github.com/cockroachdb/cockroach/pull/15623), [#15627](https://github.com/cockroachdb/cockroach/pull/15627) -- Queries using session-dependent variables (e.g., time conversions using the session's timezone) are now executed through the distributed SQL engine. [#15628](https://github.com/cockroachdb/cockroach/pull/15628) - -

Doc Updates

- -- Added an FAQ on different [ways to log SQL queries](https://www.cockroachlabs.com/docs/v1.0/sql-faqs). [#1359](https://github.com/cockroachdb/docs/pull/1359) -- Updated [local deployment tutorials](https://www.cockroachlabs.com/docs/v1.0/start-a-local-cluster) to have nodes listen only on `localhost`. This ensures these tutorials work even on machines whose hostnames aren’t resolvable. [#1358](https://github.com/cockroachdb/docs/pull/1358) - -
- -

Contributors

- -This release includes 85 merged PRs by 24 authors. We would like to thank the following contributors from the CockroachDB community: - -- Dmitry Vorobev -- Richard Artoul - -
diff --git a/src/current/_includes/releases/v1.0/v1.0.1.md b/src/current/_includes/releases/v1.0/v1.0.1.md deleted file mode 100644 index a077fd8a3e3..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0.1.md +++ /dev/null @@ -1,39 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Bug Fixes

- -- The command-line interface no longer prompts for a password twice when [password authentication](https://www.cockroachlabs.com/docs/v1.0/create-and-manage-users#secure-clusters-with-passwords) is used. [#15919](https://github.com/cockroachdb/cockroach/pull/15919) -- [`CREATE USER WITH PASSWORD`](https://www.cockroachlabs.com/docs/v1.0/create-user) now works correctly in the [`cockroach sql`](https://www.cockroachlabs.com/docs/v1.0/use-the-built-in-sql-client) shell. [#15911](https://github.com/cockroachdb/cockroach/pull/15911) -- Fixed bugs in the `MIN()` and `MAX()` [aggregate functions](https://www.cockroachlabs.com/docs/v1.0/functions-and-operators#aggregate-functions) in the presence of `NULL` values. [#15912](https://github.com/cockroachdb/cockroach/pull/15912) -- Administrator privileges are no longer required on Windows. [#15916](https://github.com/cockroachdb/cockroach/pull/15916) -- Favicons now display correctly in the web UI. [#15918](https://github.com/cockroachdb/cockroach/pull/15918) -- A clear error message is now printed when running on Windows versions older than 8 (the oldest version we support). [#15920](https://github.com/cockroachdb/cockroach/pull/15920) -- The `--cache` and `--max-sql-memory` flags of [`cockroach start`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) now use their default values on OpenBSD. [#15914](https://github.com/cockroachdb/cockroach/pull/15914) -- Attempting to [create a view](https://www.cockroachlabs.com/docs/v1.0/create-view) involving an array now returns an error instead of crashing the server. [#15913](https://github.com/cockroachdb/cockroach/pull/15913) -- [`cockroach start --insecure`](https://www.cockroachlabs.com/docs/v1.0/start-a-node) now prints a warning about the consequences of insecure mode. [#15924](https://github.com/cockroachdb/cockroach/pull/15924) -- The `round()` function for floats now works on `arm64` platforms. In some instances this changes the low bits of the result on other platforms. [#15915](https://github.com/cockroachdb/cockroach/pull/15915) -- [Collation](https://www.cockroachlabs.com/docs/v1.0/collate) locales containing capital letters are now parsed correctly. [#15917](https://github.com/cockroachdb/cockroach/pull/15917) -- Fixed a panic in node ID allocation. [#15937](https://github.com/cockroachdb/cockroach/pull/15937) -- Other errors in node/store ID allocation are now retried automatically. [#15973](https://github.com/cockroachdb/cockroach/pull/15973) -- [Dropping a database](https://www.cockroachlabs.com/docs/v1.0/drop-database) containing views now works correctly. [#15983](https://github.com/cockroachdb/cockroach/pull/15983) -- Fully-qualified column names referring to [views](https://www.cockroachlabs.com/docs/v1.0/views) now work correctly. [#15984](https://github.com/cockroachdb/cockroach/pull/15984) -- Fixed a panic when preparing a query with window functions. [#16019](https://github.com/cockroachdb/cockroach/pull/16019) -- Fixed a panic when null arguments are given to the `generate_series()` or `unnest()` function. [#16017](https://github.com/cockroachdb/cockroach/pull/16017) - -

Admin UI Changes

- -- Fixed the display of timestamps in logs. [#16002](https://github.com/cockroachdb/cockroach/pull/16002) - -

Performance Improvements

- -- Disabled RocksDB assertions in release builds. [#15943](https://github.com/cockroachdb/cockroach/pull/15943) [#15960](https://github.com/cockroachdb/cockroach/pull/15960) -- Index joins for queries with limits no longer fetch unnecessary rows. [#15969](https://github.com/cockroachdb/cockroach/pull/15969) -- Fixed an issue causing high CPU usage after data is deleted or overwritten. [#16006](https://github.com/cockroachdb/cockroach/pull/16006) - -

Doc Updates

- -- Contributing to the docs is now easier. The new **Contribute** options at the top of every page of the docs let you immediately start a PR with page edits or open a GitHub issue to report issues on the page or to request a new topic. [#1440](https://github.com/cockroachdb/docs/pull/1440) -- Added troubleshooting guidance on [common errors](https://www.cockroachlabs.com/docs/v1.0/common-errors), [cluster and node setup](https://www.cockroachlabs.com/docs/v1.0/cluster-setup-troubleshooting), [SQL query behavior](https://www.cockroachlabs.com/docs/v1.0/query-behavior-troubleshooting), [using debug and error logs](https://www.cockroachlabs.com/docs/v1.0/debug-and-error-logs), [finding support resources](https://www.cockroachlabs.com/docs/v1.0/support-resources), and [filing issues](https://www.cockroachlabs.com/docs/v1.0/file-an-issue). [#1370](https://github.com/cockroachdb/docs/pull/1370) -- Added [FAQs](https://www.cockroachlabs.com/docs/v1.0/operational-faqs) on why memory and disk usage increases steadily in new clusters. [#1450](https://github.com/cockroachdb/docs/pull/1450) diff --git a/src/current/_includes/releases/v1.0/v1.0.2.md b/src/current/_includes/releases/v1.0/v1.0.2.md deleted file mode 100644 index aa8b88f7d1c..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0.2.md +++ /dev/null @@ -1,45 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Changes

- -- CockroachDB now uses Go 1.8.3 (fixing a security issue when elliptic curve certificates are used, as well as a panic). [#16330](https://github.com/cockroachdb/cockroach/pull/16330) - -

SQL Language Changes

- -- The `BEGIN` statement now accepts the `READ WRITE` modifier for compatibility with the Go `lib/pq` driver. [#16348](https://github.com/cockroachdb/cockroach/pull/16348) - -

Command-Line Interface Changes

- -- More node configuration details get logged on node initialization. [#16209](https://github.com/cockroachdb/cockroach/pull/16209) - -

Bug Fixes

- -- Fixed several bugs with the [`OFFSET`](https://www.cockroachlabs.com/docs/v1.0/select#paginate-through-limited-results) clause, including incorrect results and a panic when it is used without a `LIMIT`. [#16315](https://github.com/cockroachdb/cockroach/pull/16315) -- Fixed the behavior of `LIMIT` in index joins. [#16340](https://github.com/cockroachdb/cockroach/pull/16340) -- Fixed the behavior of [`DISTINCT`](https://www.cockroachlabs.com/docs/v1.0/select#select-distinct-rows) with omitted columns. [#16318](https://github.com/cockroachdb/cockroach/pull/16318) -- Fixed decoding of very large [`DECIMAL`](https://www.cockroachlabs.com/docs/v1.0/decimal) values. [#16284](https://github.com/cockroachdb/cockroach/pull/16284) -- [`ALTER TABLE...ADD FOREIGN KEY`](https://www.cockroachlabs.com/docs/v1.0/add-constraint#add-the-foreign-key-constraint) no longer attempts to create an index on the source table. [#16333](https://github.com/cockroachdb/cockroach/pull/16333) -- `INT2VECTOR` is now zero-indexed. [#16326](https://github.com/cockroachdb/cockroach/pull/16326) -- Timestamp parsing is now consistent between prepared and non-prepared statements. [#16327](https://github.com/cockroachdb/cockroach/pull/16327) -- If a prepared statement is invalidated by a schema change, it now returns an error instead of an incorrect result. [#16335](https://github.com/cockroachdb/cockroach/pull/16335) -- The replica allocator will no longer remove replicas from a down node if there are no live nodes to receive them. [#16218](https://github.com/cockroachdb/cockroach/pull/16218) -- `ConditionalPut` failures during initial migrations are now handled gracefully. [#16244](https://github.com/cockroachdb/cockroach/pull/16244) -- `EXPLAIN (TRACE)` and the `sql.trace.txn.enable_threshold` cluster setting now work even when the `trace.debug.enable` [cluster setting](https://www.cockroachlabs.com/docs/v1.0/cluster-settings) is false (the default). [#16217](https://github.com/cockroachdb/cockroach/pull/16217) -- Improved the ability of a cluster to restart with some nodes missing. [#16353](https://github.com/cockroachdb/cockroach/pull/16353) -- [Backup URLs](https://www.cockroachlabs.com/docs/v1.0/backup#backup-file-urls) are now sanitized before being written to the system jobs table. [#16386](https://github.com/cockroachdb/cockroach/pull/16386) -- [`cockroach zone set`](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones) with an incomplete config for the special system ranges now merges the given config with the default instead of using zeros. [#16212](https://github.com/cockroachdb/cockroach/pull/16212) - -

Performance Improvements

- -- Improved performance of MVCC garbage collection. [#16219](https://github.com/cockroachdb/cockroach/pull/16219) [#16231](https://github.com/cockroachdb/cockroach/pull/16231) -- Adjusted timeouts for snapshot throttling. [#16328](https://github.com/cockroachdb/cockroach/pull/16328) - -

Doc Updates

- -- Added [basic hardware recommendations](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings#hardware) for a CockroachDB cluster. [#1527](https://github.com/cockroachdb/docs/pull/1527) -- Completed the [`ADD COLUMN`](https://www.cockroachlabs.com/docs/v1.0/add-column) documentation. [#1483](https://github.com/cockroachdb/docs/pull/1483) -- Improved the [`RENAME TABLE`](https://www.cockroachlabs.com/docs/v1.0/rename-table) documentation. [#1532](https://github.com/cockroachdb/docs/pull/1532) -- Improved the [`DROP DATABASE`](https://www.cockroachlabs.com/docs/v1.0/drop-database) documentation. [#1540](https://github.com/cockroachdb/docs/pull/1540) -- Added examples for [creating a replication zone for system ranges](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones#create-a-replication-zone-for-system-ranges) and [tweaking the replication of system ranges](https://www.cockroachlabs.com/docs/v1.0/configure-replication-zones#tweaking-the-replication-of-system-ranges). [#1441](https://github.com/cockroachdb/docs/pull/1441) diff --git a/src/current/_includes/releases/v1.0/v1.0.3.md b/src/current/_includes/releases/v1.0/v1.0.3.md deleted file mode 100644 index bff44f9d1c5..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0.3.md +++ /dev/null @@ -1,33 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

SQL Language Changes

- -- `SET client_encoding='unicode'` is now recognized as equivalent to `UTF-8`. [#16513](https://github.com/cockroachdb/cockroach/pull/16513) -- The `IN` operator now works with tuples of `OIDs`. [#16645](https://github.com/cockroachdb/cockroach/pull/16645) - -

Bug Fixes

- -- Fixed a consistency problem that could result when a transaction commit races with a lease transfer. [#16775](https://github.com/cockroachdb/cockroach/pull/16775) -- Fixed a panic with message `indexed var linked to different container`. [#16444](https://github.com/cockroachdb/cockroach/pull/16444) -- Casting a string to `regtype` now uses the full type parser. [#16480](https://github.com/cockroachdb/cockroach/pull/16480) [#16792](https://github.com/cockroachdb/cockroach/pull/16792) -- Fixed formatting of some error messages containing `%` characters. [#16617](https://github.com/cockroachdb/cockroach/pull/16617) -- Fixed a panic in the timestamp cache. [#16711](https://github.com/cockroachdb/cockroach/pull/16711) -- Fixed a memory leak when [`EXPLAIN`](https://www.cockroachlabs.com/docs/v1.0/explain) is used on a query containing [`UNION`](https://www.cockroachlabs.com/docs/v1.0/select#union-combine-two-queries). [#16775](https://github.com/cockroachdb/cockroach/pull/16775) - -

Performance Improvements

- -- The intent resolver now works in smaller batches, reducing memory spikes after large transactions. [#16741](https://github.com/cockroachdb/cockroach/pull/16741) -- MVCC garbage collection now works in smaller batches, avoiding problems with refreshing leases. [#16735](https://github.com/cockroachdb/cockroach/pull/16735) - -

Doc Updates

- -- Introduced docs for specific versions of CockroachDB. [#1605](https://github.com/cockroachdb/docs/pull/1605) - - Documentation for the latest [production release]({% link releases/index.md %}#production-releases) is at https://www.cockroachlabs.com/docs/stable. - - Documentation for the latest [testing release]({% link releases/index.md %}#testing-releases) is at https://www.cockroachlabs.com/docs/dev. - - On any page, you can switch between versions using the version selector in the lower-left. -- Updated the [Deploy CockroachDB on Google Cloud Platform](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-google-cloud-platform) tutorial to provide guidance on using Google's managed TCP Proxy Load Balancing service. [#1665](https://github.com/cockroachdb/docs/pull/1665) -- Added an [Automatic Cloud Migration](https://www.cockroachlabs.com/docs/v1.0/demo-automatic-cloud-migration) tutorial that shows you how to use a local cluster to simulate deploying across clouds and migrating from one cloud to another with no downtime. [#1610](https://github.com/cockroachdb/docs/pull/1610) -- Expanded details on [how CockroachDB handles unsynchronized clocks](https://www.cockroachlabs.com/docs/v1.0/recommended-production-settings#clock-synchronization). [#1636](https://github.com/cockroachdb/docs/pull/1636) -- Updated the [Node.js transaction code sample](https://www.cockroachlabs.com/docs/v1.0/build-a-nodejs-app-with-cockroachdb) to perform a `SELECT` as part of the transaction and return the results through the transaction wrapper. [#1615](https://github.com/cockroachdb/docs/pull/1615) diff --git a/src/current/_includes/releases/v1.0/v1.0.4.md b/src/current/_includes/releases/v1.0/v1.0.4.md deleted file mode 100644 index 2b6917ea681..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0.4.md +++ /dev/null @@ -1,21 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

SQL Language Changes

- -- [`ROLLBACK TO SAVEPOINT`](https://www.cockroachlabs.com/docs/v1.0/rollback-transaction) is now accepted at any time. [#16932](https://github.com/cockroachdb/cockroach/pull/16932) - -

Bug Fixes

- -- Fixed a race condition that could lead to serializability violations when requests race with a lease transfer. [#17141](https://github.com/cockroachdb/cockroach/pull/17141) -- [`CREATE TABLE AS SELECT`](https://www.cockroachlabs.com/docs/v1.0/create-table-as) now handles values of type `BOOL`. [#17092](https://github.com/cockroachdb/cockroach/pull/17092) -- Fixed handling of columns names containing special characters in [`SHOW CREATE TABLE`](https://www.cockroachlabs.com/docs/v1.0/show-create-table), [`CREATE VIEW`](https://www.cockroachlabs.com/docs/v1.0/create-view), and [`CREATE TABLE AS SELECT`](https://www.cockroachlabs.com/docs/v1.0/create-table-as). [#16783](https://github.com/cockroachdb/cockroach/pull/16783) -- Fixed issues with [diagnostic reporting](https://www.cockroachlabs.com/docs/v1.1/diagnostics-reporting). [#17085](https://github.com/cockroachdb/cockroach/pull/17085) -- Fixed a panic with `--vmodule=plan=3` or `--verbosity=3`. [#17093](https://github.com/cockroachdb/cockroach/pull/17093) - -

Doc Updates

- -- Improved code samples for [building a Rust app with CockroachDB](https://www.cockroachlabs.com/docs/v1.0/build-a-rust-app-with-cockroachdb). [#1746](https://github.com/cockroachdb/docs/pull/1746) -- Improved documentation for secure [Docker Swarm deployments](https://www.cockroachlabs.com/docs/v1.0/orchestrate-cockroachdb-with-docker-swarm). [#1735](https://github.com/cockroachdb/docs/pull/1735) -- Fixed links to Google Cloud Platform documentation in [deployment docs](https://www.cockroachlabs.com/docs/v1.0/deploy-cockroachdb-on-google-cloud-platform). [#1744](https://github.com/cockroachdb/docs/pull/1744) diff --git a/src/current/_includes/releases/v1.0/v1.0.5.md b/src/current/_includes/releases/v1.0/v1.0.5.md deleted file mode 100644 index 319a3ac4716..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0.5.md +++ /dev/null @@ -1,24 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Command-Line Interface Changes

- -- Fatal error messages are now printed to both `stderr` and the [logs](https://www.cockroachlabs.com/docs/v1.0/debug-and-error-logs). [#17675](https://github.com/cockroachdb/cockroach/pull/17675) - -

Bug Fixes

- -- [Foreign keys](https://www.cockroachlabs.com/docs/v1.0/foreign-key) that point to a prefix of an existing index now work correctly. Previously, rows could be deleted from the referenced table without recognizing the constraint violation if rows in the referring table pointed to them. These foreign keys are also now shown correctly in [`SHOW CREATE TABLE`](https://www.cockroachlabs.com/docs/v1.0/show-create-table). [#17653](https://github.com/cockroachdb/cockroach/pull/17653) -- The crash reporter will now time out instead of waiting indefinitely trying to reach the server. [#17689](https://github.com/cockroachdb/cockroach/pull/17689) -- `AS OF SYSTEM TIME 0` is now a normal error instead of a panic. [#17681](https://github.com/cockroachdb/cockroach/pull/17681) -- Fixed a panic caused by unclosed `planNodes`. [#17281](https://github.com/cockroachdb/cockroach/pull/17281) -- 3DES has been removed from the list of supported TLS cipher suites (it would never be selected because we require TLS 1.2, but it could be reported as a false positive by security scanners). [#17237](https://github.com/cockroachdb/cockroach/pull/17237) - -

Performance Improvements

- -- Reduced unhelpful retries of slow commands. [#17385](https://github.com/cockroachdb/cockroach/pull/17385) - -

Doc Updates

- -- Added an [FAQ](https://www.cockroachlabs.com/docs/v1.0/frequently-asked-questions#why-does-cockroachdb-use-the-postgresql-wire-protocol-instead-of-the-mysql-protocol) on why CockroachDB uses the PostgreSQL wire protocol instead of the MySQL protocol. [#1845](https://github.com/cockroachdb/docs/pull/1845) -- Added a [known limitation](https://www.cockroachlabs.com/docs/v1.0/known-limitations#dropping-an-index-interleaved-into-another-index-on-the-same-table) around dropping an index interleaved into another index on the same table. [#1859](https://github.com/cockroachdb/docs/pull/1859) diff --git a/src/current/_includes/releases/v1.0/v1.0.6.md b/src/current/_includes/releases/v1.0/v1.0.6.md deleted file mode 100644 index 6b4199fcb39..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0.6.md +++ /dev/null @@ -1,19 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Bug Fixes

- -- Fixed a bug in `ReverseScan` that could cause data to be missing from query results. [#17891](https://github.com/cockroachdb/cockroach/pull/17891) [#18385](https://github.com/cockroachdb/cockroach/pull/18385) -- Fixed a potential data corruption bug in RocksDB. [#18397](https://github.com/cockroachdb/cockroach/pull/18397) -- Fixed a panic in some `UNION` queries. [#17933](https://github.com/cockroachdb/cockroach/pull/17933) -- It is no longer possible to set the `kv.raft.command.max_size` cluster setting to a very low value (which could make the cluster inoperable). [#18016](https://github.com/cockroachdb/cockroach/pull/18016) -- Fixed a range lease bug that could make ranges unavailable. [#18218](https://github.com/cockroachdb/cockroach/pull/18218) -- Parallel statement execution is now disabled by default due to issues in handling retryable errors. [#18002](https://github.com/cockroachdb/cockroach/pull/18002) [#18332](https://github.com/cockroachdb/cockroach/pull/18332) -- Fixed a resource leak in diagnostics reporting [#18315](https://github.com/cockroachdb/cockroach/pull/18315) -- Host names are no longer included in diagnostics reports. [#18335](https://github.com/cockroachdb/cockroach/pull/18335) -- Increased logging to help diagnose certain issues. [#18229](https://github.com/cockroachdb/cockroach/pull/18229) [#18272](https://github.com/cockroachdb/cockroach/pull/18272) [#18282](https://github.com/cockroachdb/cockroach/pull/18282) [#18333](https://github.com/cockroachdb/cockroach/pull/18333) - -

Doc Updates

- -- Updated the [Contribute to CockroachDB](https://wiki.crdb.io/wiki/spaces/CRDB/pages/73204033/Contributing+to+CockroachDB) page to provide guidelines on finding a project, determining its complexity, and learning what to expect in your collaboration with the Cockroach Labs team. [#1881](https://github.com/cockroachdb/docs/pull/1881) diff --git a/src/current/_includes/releases/v1.0/v1.0.7.md b/src/current/_includes/releases/v1.0/v1.0.7.md deleted file mode 100644 index 9a23d800926..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0.7.md +++ /dev/null @@ -1,8 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

Bug Fixes

- -- Fixed a leak of table leases that could grow to a point that a cluster becomes unusable. [#22563](https://github.com/cockroachdb/cockroach/pull/22563) -- When the log disk fills up, the process now crashes instead of hanging. [#22563](https://github.com/cockroachdb/cockroach/pull/22563) diff --git a/src/current/_includes/releases/v1.0/v1.0.md b/src/current/_includes/releases/v1.0/v1.0.md deleted file mode 100644 index f16628eee93..00000000000 --- a/src/current/_includes/releases/v1.0/v1.0.md +++ /dev/null @@ -1,20 +0,0 @@ -

{{ include.release }}

- -Release Date: {{ include.release_date | date: "%B %-d, %Y" }} - -

General Availability

- -With this release, CockroachDB graduates from beta to a production-ready database built for the cloud and designed to power business at any scale from startup to enterprise. Years of development have gone into the capabilities included, from **distributed SQL** workloads with guaranteed ACID semantics, to **multi-active availability** with consistent consensus-based replication, to **simple and flexible deployment** and operations, to enterprise-scale **distributed, incremental backup and restore**. - -Read more about these critical product areas in [this blog post](https://www.cockroachlabs.com/blog/cockroachdb-1-0-release/), and then get started: - -- Install CockroachDB using the download links above -- [Start a local cluster](https://www.cockroachlabs.com/docs/v1.0/demo-data-replication) and explore how CockroachDB automatically replicates and distributes data as you scale -- [Build an app](https://www.cockroachlabs.com/docs/v1.0/build-an-app-with-cockroachdb) on CockroachDB using a PostgreSQL-compatible driver or ORM -- Learn how to deploy a production cluster [manually](https://www.cockroachlabs.com/docs/v1.0/manual-deployment) or in the [cloud](https://www.cockroachlabs.com/docs/v1.0/cloud-deployment) - -You might also be interested in reviewing previous beta release notes for insight into the broad range of development efforts that made CockroachDB 1.0 possible. - -

Known Limitations

- -For information about limitations we've identified in CockroachDB 1.0, with suggested workarounds where applicable, see [Known Limitations](https://www.cockroachlabs.com/docs/v1.0/known-limitations). diff --git a/src/current/_includes/sidebar-data-v1.0.json b/src/current/_includes/sidebar-data-v1.0.json deleted file mode 100644 index c355d483ae2..00000000000 --- a/src/current/_includes/sidebar-data-v1.0.json +++ /dev/null @@ -1,1102 +0,0 @@ -[ - { - "title": "Docs Home", - "is_top_level": true, - "urls": [ - "/" - ] - }, - { - "title": "Quickstart", - "is_top_level": true, - "urls": [ - "/cockroachcloud/quickstart.html" - ] - }, - {% include sidebar-data-cockroachcloud.json %}, - { - "title": "CockroachDB", - "is_top_level": true, - "items": [ - { - "title": "Get Started", - "items": [ - { - "title": "Install CockroachDB", - "urls": [ - "/${VERSION}/install-cockroachdb.html" - ] - }, - { - "title": "Start a Local Cluster", - "items": [ - { - "title": "From Binary", - "urls": [ - "/${VERSION}/start-a-local-cluster.html", - "/${VERSION}/secure-a-cluster.html" - ] - }, - { - "title": "In Docker", - "urls": [ - "/${VERSION}/start-a-local-cluster-in-docker.html" - ] - } - ] - }, - { - "title": "Learn CockroachDB SQL", - "items": [ - { - "title": "Essential SQL Statements", - "urls": [ - "/${VERSION}/learn-cockroachdb-sql.html" - ] - }, - { - "title": "Use the Built-in SQL Client", - "urls": [ - "/${VERSION}/use-the-built-in-sql-client.html" - ] - } - ] - }, - { - "title": "Build an App", - "items": [ - { - "title": "Overview", - "urls": [ - "/${VERSION}/build-an-app-with-cockroachdb.html" - ] - }, - { - "title": "Go", - "urls": [ - "/${VERSION}/build-a-go-app-with-cockroachdb.html", - "/${VERSION}/build-a-go-app-with-cockroachdb-gorm.html" - ] - }, - { - "title": "Python", - "urls": [ - "/${VERSION}/build-a-python-app-with-cockroachdb.html", - "/${VERSION}/build-a-python-app-with-cockroachdb-sqlalchemy.html" - ] - }, - { - "title": "Ruby", - "urls": [ - "/${VERSION}/build-a-ruby-app-with-cockroachdb.html", - "/${VERSION}/build-a-ruby-app-with-cockroachdb-activerecord.html" - ] - }, - { - "title": "Java", - "urls": [ - "/${VERSION}/build-a-java-app-with-cockroachdb.html", - "/${VERSION}/build-a-java-app-with-cockroachdb-hibernate.html" - ] - }, - { - "title": "Node.js", - "urls": [ - "/${VERSION}/build-a-nodejs-app-with-cockroachdb.html", - "/${VERSION}/build-a-nodejs-app-with-cockroachdb-sequelize.html" - ] - }, - { - "title": "C++", - "urls": [ - "/${VERSION}/build-a-c++-app-with-cockroachdb.html" - ] - }, - { - "title": "Clojure", - "urls": [ - "/${VERSION}/build-a-clojure-app-with-cockroachdb.html" - ] - }, - { - "title": "PHP", - "urls": [ - "/${VERSION}/build-a-php-app-with-cockroachdb.html" - ] - }, - { - "title": "Rust", - "urls": [ - "/${VERSION}/build-a-rust-app-with-cockroachdb.html" - ] - } - ] - }, - { - "title": "Explore Core Features", - "items": [ - { - "title": "Multi-Active Availability", - "urls": [ - "/${VERSION}/multi-active-availability.html" - ] - }, - { - "title": "Data Replication", - "urls": [ - "/${VERSION}/demo-data-replication.html" - ] - }, - { - "title": "Fault Tolerance & Recovery", - "urls": [ - "/${VERSION}/demo-fault-tolerance-and-recovery.html" - ] - }, - { - "title": "Automatic Rebalancing", - "urls": [ - "/${VERSION}/demo-automatic-rebalancing.html" - ] - }, - { - "title": "Cross-Cloud Migration", - "urls": [ - "/${VERSION}/demo-automatic-cloud-migration.html" - ] - } - ] - } - ] - }, - { - "title": "Develop", - "items": [ - { - "title": "Install Client Drivers", - "urls": [ - "/${VERSION}/install-client-drivers.html" - ] - }, - { - "title": "SQL Feature Support", - "urls": [ - "/${VERSION}/sql-feature-support.html" - ] - }, - { - "title": "SQL Statements", - "items": [ - { - "title": "Overview", - "urls": [ - "/${VERSION}/sql-statements.html" - ] - }, - { - "title": "ADD COLUMN", - "urls": [ - "/${VERSION}/add-column.html" - ] - }, - { - "title": "ADD CONSTRAINT", - "urls": [ - "/${VERSION}/add-constraint.html" - ] - }, - { - "title": "ALTER COLUMN", - "urls": [ - "/${VERSION}/alter-column.html" - ] - }, - { - "title": "ALTER TABLE", - "urls": [ - "/${VERSION}/alter-table.html" - ] - }, - { - "title": "ALTER VIEW", - "urls": [ - "/${VERSION}/alter-view.html" - ] - }, - { - "title": "BACKUP (Enterprise)", - "urls": [ - "/${VERSION}/backup.html" - ] - }, - { - "title": "BEGIN", - "urls": [ - "/${VERSION}/begin-transaction.html" - ] - }, - { - "title": "COMMIT", - "urls": [ - "/${VERSION}/commit-transaction.html" - ] - }, - { - "title": "CREATE DATABASE", - "urls": [ - "/${VERSION}/create-database.html" - ] - }, - { - "title": "CREATE INDEX", - "urls": [ - "/${VERSION}/create-index.html" - ] - }, - { - "title": "CREATE TABLE", - "urls": [ - "/${VERSION}/create-table.html" - ] - }, - { - "title": "CREATE TABLE AS", - "urls": [ - "/${VERSION}/create-table-as.html" - ] - }, - { - "title": "CREATE USER", - "urls": [ - "/${VERSION}/create-user.html" - ] - }, - { - "title": "CREATE VIEW", - "urls": [ - "/${VERSION}/create-view.html" - ] - }, - { - "title": "DELETE", - "urls": [ - "/${VERSION}/delete.html" - ] - }, - { - "title": "DROP COLUMN", - "urls": [ - "/${VERSION}/drop-column.html" - ] - }, - { - "title": "DROP CONSTRAINT", - "urls": [ - "/${VERSION}/drop-constraint.html" - ] - }, - { - "title": "DROP DATABASE", - "urls": [ - "/${VERSION}/drop-database.html" - ] - }, - { - "title": "DROP INDEX", - "urls": [ - "/${VERSION}/drop-index.html" - ] - }, - { - "title": "DROP TABLE", - "urls": [ - "/${VERSION}/drop-table.html" - ] - }, - { - "title": "DROP VIEW", - "urls": [ - "/${VERSION}/drop-view.html" - ] - }, - { - "title": "EXPLAIN", - "urls": [ - "/${VERSION}/explain.html" - ] - }, - { - "title": "GRANT", - "urls": [ - "/${VERSION}/grant.html" - ] - }, - { - "title": "INSERT", - "urls": [ - "/${VERSION}/insert.html" - ] - }, - { - "title": "RENAME COLUMN", - "urls": [ - "/${VERSION}/rename-column.html" - ] - }, - { - "title": "RENAME DATABASE", - "urls": [ - "/${VERSION}/rename-database.html" - ] - }, - { - "title": "RENAME INDEX", - "urls": [ - "/${VERSION}/rename-index.html" - ] - }, - { - "title": "RENAME TABLE", - "urls": [ - "/${VERSION}/rename-table.html" - ] - }, - { - "title": "RELEASE SAVEPOINT", - "urls": [ - "/${VERSION}/release-savepoint.html" - ] - }, - { - "title": "RESTORE (Enterprise)", - "urls": [ - "/${VERSION}/restore.html" - ] - }, - { - "title": "REVOKE", - "urls": [ - "/${VERSION}/revoke.html" - ] - }, - { - "title": "ROLLBACK", - "urls": [ - "/${VERSION}/rollback-transaction.html" - ] - }, - { - "title": "SAVEPOINT", - "urls": [ - "/${VERSION}/savepoint.html" - ] - }, - { - "title": "SELECT", - "urls": [ - "/${VERSION}/select.html" - ] - }, - { - "title": "SET <session variable>", - "urls": [ - "/${VERSION}/set-vars.html" - ] - }, - { - "title": "SET CLUSTER SETTING", - "urls": [ - "/${VERSION}/set-cluster-setting.html" - ] - }, - { - "title": "SET TRANSACTION", - "urls": [ - "/${VERSION}/set-transaction.html" - ] - }, - { - "title": "SHOW <session variable>", - "urls": [ - "/${VERSION}/show-vars.html" - ] - }, - { - "title": "SHOW CLUSTER SETTING", - "urls": [ - "/${VERSION}/show-cluster-setting.html" - ] - }, - { - "title": "SHOW COLUMNS", - "urls": [ - "/${VERSION}/show-columns.html" - ] - }, - { - "title": "SHOW CONSTRAINTS", - "urls": [ - "/${VERSION}/show-constraints.html" - ] - }, - { - "title": "SHOW CREATE TABLE", - "urls": [ - "/${VERSION}/show-create-table.html" - ] - }, - { - "title": "SHOW CREATE VIEW", - "urls": [ - "/${VERSION}/show-create-view.html" - ] - }, - { - "title": "SHOW DATABASES", - "urls": [ - "/${VERSION}/show-databases.html" - ] - }, - { - "title": "SHOW GRANTS", - "urls": [ - "/${VERSION}/show-grants.html" - ] - }, - { - "title": "SHOW INDEX", - "urls": [ - "/${VERSION}/show-index.html" - ] - }, - { - "title": "SHOW TABLES", - "urls": [ - "/${VERSION}/show-tables.html" - ] - }, - { - "title": "SHOW USERS", - "urls": [ - "/${VERSION}/show-users.html" - ] - }, - { - "title": "TRUNCATE", - "urls": [ - "/${VERSION}/truncate.html" - ] - }, - { - "title": "UPDATE", - "urls": [ - "/${VERSION}/update.html" - ] - }, - { - "title": "UPSERT", - "urls": [ - "/${VERSION}/upsert.html" - ] - } - ] - }, - { - "title": "SQL Syntax", - "items": [ - { - "title": "Keywords & Identifiers", - "urls": [ - "/${VERSION}/keywords-and-identifiers.html" - ] - }, - { - "title": "Constants", - "urls": [ - "/${VERSION}/sql-constants.html" - ] - }, - { - "title": "Value Expressions", - "urls": [ - "/${VERSION}/sql-expressions.html" - ] - }, - { - "title": "Table Expressions", - "urls": [ - "/${VERSION}/table-expressions.html" - ] - }, - { - "title": "Name Resolution", - "urls": [ - "/${VERSION}/sql-name-resolution.html" - ] - }, - { - "title": "AS OF SYSTEM TIME", - "urls": [ - "/${VERSION}/as-of-system-time.html" - ] - }, - { - "title": "NULL Handling", - "urls": [ - "/${VERSION}/null-handling.html" - ] - }, - { - "title": "Full SQL Grammar", - "urls": [ - "/${VERSION}/sql-grammar.html" - ] - } - ] - }, - { - "title": "Constraints", - "items": [ - { - "title": "Overview", - "urls": [ - "/${VERSION}/constraints.html" - ] - }, - { - "title": "Check", - "urls": [ - "/${VERSION}/check.html" - ] - }, - { - "title": "Default Value", - "urls": [ - "/${VERSION}/default-value.html" - ] - }, - { - "title": "Foreign Key", - "urls": [ - "/${VERSION}/foreign-key.html" - ] - }, - { - "title": "Not Null", - "urls": [ - "/${VERSION}/not-null.html" - ] - }, - { - "title": "Primary Key", - "urls": [ - "/${VERSION}/primary-key.html" - ] - }, - { - "title": "Unique", - "urls": [ - "/${VERSION}/unique.html" - ] - } - ] - }, - { - "title": "Data Types", - "items": [ - { - "title": "Overview", - "urls": [ - "/${VERSION}/data-types.html" - ] - }, - { - "title": "INT", - "urls": [ - "/${VERSION}/int.html" - ] - }, - { - "title": "SERIAL", - "urls": [ - "/${VERSION}/serial.html" - ] - }, - { - "title": "DECIMAL", - "urls": [ - "/${VERSION}/decimal.html" - ] - }, - { - "title": "FLOAT", - "urls": [ - "/${VERSION}/float.html" - ] - }, - { - "title": "BOOL", - "urls": [ - "/${VERSION}/bool.html" - ] - }, - { - "title": "DATE", - "urls": [ - "/${VERSION}/date.html" - ] - }, - { - "title": "TIMESTAMP", - "urls": [ - "/${VERSION}/timestamp.html" - ] - }, - { - "title": "INTERVAL", - "urls": [ - "/${VERSION}/interval.html" - ] - }, - { - "title": "STRING", - "urls": [ - "/${VERSION}/string.html" - ] - }, - { - "title": "COLLATE", - "urls": [ - "/${VERSION}/collate.html" - ] - }, - { - "title": "BYTES", - "urls": [ - "/${VERSION}/bytes.html" - ] - } - ] - }, - { - "title": "Privileges", - "urls": [ - "/${VERSION}/privileges.html" - ] - }, - { - "title": "Functions and Operators", - "urls": [ - "/${VERSION}/functions-and-operators.html" - ] - }, - { - "title": "Transactions", - "urls": [ - "/${VERSION}/transactions.html" - ] - }, - { - "title": "Views", - "urls": [ - "/${VERSION}/views.html" - ] - }, - { - "title": "Window Functions", - "urls": [ - "/${VERSION}/window-functions.html" - ] - }, - { - "title": "Performance Optimization", - "items": [ - { - "title": "Indexes", - "urls": [ - "/${VERSION}/indexes.html" - ] - }, - { - "title": "Column Families", - "urls": [ - "/${VERSION}/column-families.html" - ] - }, - { - "title": "Interleaved Tables", - "urls": [ - "/${VERSION}/interleave-in-parent.html" - ] - } - ] - }, - { - "title": "Information Schema", - "urls": [ - "/${VERSION}/information-schema.html" - ] - }, - { - "title": "Porting Applications", - "items": [ - { - "title": "From PostgreSQL", - "urls": [ - "/${VERSION}/porting-postgres.html" - ] - } - ] - } - ] - }, - { - "title": "Deploy", - "items": [ - { - "title": "Recommended Production Settings", - "urls": [ - "/${VERSION}/recommended-production-settings.html" - ] - }, - { - "title": "Manual Deployment", - "urls": [ - "/${VERSION}/manual-deployment.html", - "/${VERSION}/manual-deployment-insecure.html" - ] - }, - { - "title": "Cloud Deployment", - "items": [ - { - "title": "Overview", - "urls": [ - "/${VERSION}/cloud-deployment.html" - ] - }, - { - "title": "AWS", - "urls": [ - "/${VERSION}/deploy-cockroachdb-on-aws.html", - "/${VERSION}/deploy-cockroachdb-on-aws-insecure.html" - ] - }, - { - "title": "Azure", - "urls": [ - "/${VERSION}/deploy-cockroachdb-on-microsoft-azure.html", - "/${VERSION}/deploy-cockroachdb-on-microsoft-azure-insecure.html" - ] - }, - { - "title": "Digital Ocean", - "urls": [ - "/${VERSION}/deploy-cockroachdb-on-digital-ocean.html", - "/${VERSION}/deploy-cockroachdb-on-digital-ocean-insecure.html" - ] - }, - { - "title": "Google Cloud Platform GCE", - "urls": [ - "/${VERSION}/deploy-cockroachdb-on-google-cloud-platform.html", - "/${VERSION}/deploy-cockroachdb-on-google-cloud-platform-insecure.html" - ] - } - ] - }, - { - "title": "Orchestration", - "items": [ - { - "title": "Overview", - "urls": [ - "/${VERSION}/orchestration.html" - ] - }, - { - "title": "Kubernetes", - "urls": [ - "/${VERSION}/orchestrate-cockroachdb-with-kubernetes.html" - ] - }, - { - "title": "Docker Swarm", - "urls": [ - "/${VERSION}/orchestrate-cockroachdb-with-docker-swarm.html", - "/${VERSION}/orchestrate-cockroachdb-with-docker-swarm-insecure.html" - ] - } - ] - }, - { - "title": "Cluster Settings", - "urls": [ - "/${VERSION}/cluster-settings.html" - ] - }, - { - "title": "Cockroach Commands", - "urls": [ - "/${VERSION}/cockroach-commands.html" - ] - }, - { - "title": "Start a Node", - "urls": [ - "/${VERSION}/start-a-node.html" - ] - }, - { - "title": "Create Security Certificates", - "urls": [ - "/${VERSION}/create-security-certificates.html" - ] - }, - { - "title": "Create & Manage Users", - "urls": [ - "/${VERSION}/create-and-manage-users.html" - ] - }, - { - "title": "Configure Replication Zones", - "urls": [ - "/${VERSION}/configure-replication-zones.html" - ] - } - ] - }, - { - "title": "Manage", - "items": [ - { - "title": "Explore the Admin UI", - "urls": [ - "/${VERSION}/explore-the-admin-ui.html" - ] - }, - { - "title": "Monitor CockroachDB with Prometheus", - "urls": [ - "/${VERSION}/monitor-cockroachdb-with-prometheus.html" - ] - }, - { - "title": "Upgrade a Cluster's Version", - "urls": [ - "/${VERSION}/upgrade-cockroach-version.html" - ] - }, - { - "title": "Stop a Node", - "urls": [ - "/${VERSION}/stop-a-node.html" - ] - }, - { - "title": "Back up Data", - "urls": [ - "/${VERSION}/back-up-data.html" - ] - }, - { - "title": "Restore Data", - "urls": [ - "/${VERSION}/restore-data.html" - ] - }, - { - "title": "Dump/Export Schema or Data", - "urls": [ - "/${VERSION}/sql-dump.html" - ] - }, - { - "title": "Import Data", - "urls": [ - "/${VERSION}/import-data.html" - ] - }, - { - "title": "Generate CockroachDB Resources", - "urls": [ - "/${VERSION}/generate-cockroachdb-resources.html" - ] - }, - { - "title": "View Node Details", - "urls": [ - "/${VERSION}/view-node-details.html" - ] - }, - { - "title": "View Version Details", - "urls": [ - "/${VERSION}/view-version-details.html" - ] - }, - { - "title": "Diagnostics Reporting", - "urls": [ - "/${VERSION}/diagnostics-reporting.html" - ] - } - ] - }, - { - "title": "Troubleshoot", - "items": [ - { - "title": "Overview", - "urls": [ - "/${VERSION}/troubleshooting-overview.html" - ] - }, - { - "title": "Common Errors", - "urls": [ - "/${VERSION}/common-errors.html" - ] - }, - { - "title": "Cluster & Node Setup", - "urls": [ - "/${VERSION}/cluster-setup-troubleshooting.html" - ] - }, - { - "title": "Query Behavior", - "urls": [ - "/${VERSION}/query-behavior-troubleshooting.html" - ] - }, - { - "title": "Debug & Error Logs", - "urls": [ - "/${VERSION}/debug-and-error-logs.html" - ] - }, - { - "title": "Collect Cluster Debug Info", - "urls": [ - "/${VERSION}/debug-zip.html" - ] - }, - { - "title": "Support Resources", - "urls": [ - "/${VERSION}/support-resources.html" - ] - }, - { - "title": "File an Issue", - "urls": [ - "/${VERSION}/file-an-issue.html" - ] - } - ] - }, - { - "title": "Contribute", - "items": [ - { - "title": "Improve the Docs", - "urls": [ - "/${VERSION}/improve-the-docs.html" - ] - } - ] - }, - {% include sidebar-releases.json %}, - { - "title": "FAQs", - "items": [ - { - "title": "Product FAQs", - "urls": [ - "/${VERSION}/frequently-asked-questions.html" - ] - }, - { - "title": "SQL FAQs", - "urls": [ - "/${VERSION}/sql-faqs.html" - ] - }, - { - "title": "Operational FAQs", - "urls": [ - "/${VERSION}/operational-faqs.html" - ] - }, - { - "title": "CockroachDB in Comparison", - "urls": [ - "/${VERSION}/cockroachdb-in-comparison.html" - ] - }, - { - "title": "CockroachDB Architecture", - "urls": [ - "/${VERSION}/cockroachdb-architecture.html" - ] - }, - { - "title": "CockroachDB Features", - "items": [ - { - "title": "Simplified Deployment", - "urls": [ - "/${VERSION}/simplified-deployment.html" - ] - }, - { - "title": "Strong Consistency", - "urls": [ - "/${VERSION}/strong-consistency.html" - ] - }, - { - "title": "SQL", - "urls": [ - "/${VERSION}/sql.html" - ] - }, - { - "title": "Distributed Transactions", - "urls": [ - "/${VERSION}/distributed-transactions.html" - ] - }, - { - "title": "Automated Scaling & Repair", - "urls": [ - "/${VERSION}/automated-scaling-and-repair.html" - ] - }, - { - "title": "High Availability", - "urls": [ - "/${VERSION}/high-availability.html" - ] - }, - { - "title": "Open Source", - "urls": [ - "/${VERSION}/open-source.html" - ] - }, - { - "title": "Go Implementation", - "urls": [ - "/${VERSION}/go-implementation.html" - ] - } - ] - } - ] - } - ] - } -] diff --git a/src/current/_includes/v1.0/app/BasicSample.java b/src/current/_includes/v1.0/app/BasicSample.java deleted file mode 100644 index c6466fb02f8..00000000000 --- a/src/current/_includes/v1.0/app/BasicSample.java +++ /dev/null @@ -1,34 +0,0 @@ -import java.sql.*; - -/* -You can compile and run this example with a command like: - javac BasicSample.java && java -cp .:~/path/to/postgresql-9.4.1208.jar BasicSample -You can download the postgres JDBC driver jar from https://jdbc.postgresql.org. -*/ -public class BasicSample { - public static void main(String[] args) throws ClassNotFoundException, SQLException { - // Load the postgres JDBC driver. - Class.forName("org.postgresql.Driver"); - - // Connect to the "bank" database. - Connection db = DriverManager.getConnection("jdbc:postgresql://127.0.0.1:26257/bank?sslmode=disable", "maxroach", ""); - - try { - // Create the "accounts" table. - db.createStatement().execute("CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT)"); - - // Insert two rows into the "accounts" table. - db.createStatement().execute("INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)"); - - // Print out the balances. - System.out.println("Initial balances:"); - ResultSet res = db.createStatement().executeQuery("SELECT id, balance FROM accounts"); - while (res.next()) { - System.out.printf("\taccount %s: %s\n", res.getInt("id"), res.getInt("balance")); - } - } finally { - // Close the database connection. - db.close(); - } - } -} diff --git a/src/current/_includes/v1.0/app/TxnSample.java b/src/current/_includes/v1.0/app/TxnSample.java deleted file mode 100644 index 4f965d6e1a3..00000000000 --- a/src/current/_includes/v1.0/app/TxnSample.java +++ /dev/null @@ -1,113 +0,0 @@ -import java.sql.*; - -/* - You can compile and run this example with a command like: - javac TxnSample.java && java -cp .:~/path/to/postgresql-9.4.1208.jar TxnSample - You can download the postgres JDBC driver jar from https://jdbc.postgresql.org. -*/ - -// Ambiguous whether the transaction committed or not. -class AmbiguousCommitException extends SQLException{ - public AmbiguousCommitException(Throwable cause) { - super(cause); - } -} -class InsufficientBalanceException extends Exception {} -class AccountNotFoundException extends Exception { - public int account; - public AccountNotFoundException(int account) { - this.account = account; - } -} - -// A simple interface that provides a retryable lambda expression. -interface RetryableTransaction { - public void run(Connection conn) - throws SQLException, InsufficientBalanceException, AccountNotFoundException, AmbiguousCommitException; -} - -public class TxnSample { - public static RetryableTransaction transferFunds(int from, int to, int amount) { - return new RetryableTransaction() { - public void run(Connection conn) - throws SQLException, InsufficientBalanceException, AccountNotFoundException, AmbiguousCommitException { - // Check the current balance. - ResultSet res = conn.createStatement().executeQuery("SELECT balance FROM accounts WHERE id = " + from); - if(!res.next()) { - throw new AccountNotFoundException(from); - } - int balance = res.getInt("balance"); - if(balance < from) { - throw new InsufficientBalanceException(); - } - // Perform the transfer. - conn.createStatement().executeUpdate("UPDATE accounts SET balance = balance - " + amount + " where id = " + from); - conn.createStatement().executeUpdate("UPDATE accounts SET balance = balance + " + amount + " where id = " + to); - } - }; - } - - public static void retryTransaction(Connection conn, RetryableTransaction tx) - throws SQLException, InsufficientBalanceException, AccountNotFoundException, AmbiguousCommitException { - Savepoint sp = conn.setSavepoint("cockroach_restart"); - while(true) { - boolean releaseAttempted = false; - try { - tx.run(conn); - releaseAttempted = true; - conn.releaseSavepoint(sp); - } - catch(SQLException e) { - String sqlState = e.getSQLState(); - // Check if the error code indicates a SERIALIZATION_FAILURE. - if(sqlState.equals("40001")) { - // Signal the database that we will attempt a retry. - conn.rollback(sp); - continue; - } else if(releaseAttempted) { - throw new AmbiguousCommitException(e); - } else { - throw e; - } - } - break; - } - conn.commit(); - } - - public static void main(String[] args) throws ClassNotFoundException, SQLException { - // Load the postgres JDBC driver. - Class.forName("org.postgresql.Driver"); - - // Connect to the "bank" database. - Connection db = DriverManager.getConnection("jdbc:postgresql://127.0.0.1:26257/bank?sslmode=disable", "maxroach", ""); - try { - // We need to turn off autocommit mode to allow for - // multi-statement transactions. - db.setAutoCommit(false); - // Perform the transfer. This assumes the table has - // already been set up as in the "Build a Test App" - // tutorial. - RetryableTransaction transfer = transferFunds(1, 2, 100); - retryTransaction(db, transfer); - - // Check balances after transfer. - db.setAutoCommit(true); - ResultSet res = db.createStatement().executeQuery("SELECT id, balance FROM accounts"); - while (res.next()) { - System.out.printf("\taccount %s: %s\n", res.getInt("id"), res.getInt("balance")); - } - } catch(InsufficientBalanceException e) { - System.out.println("Insufficient balance"); - } catch(AccountNotFoundException e) { - System.out.println("No users in the table with id " + e.account); - } catch(AmbiguousCommitException e) { - System.out.println("Ambiguous result encountered: " + e); - } catch(SQLException e) { - System.out.println("SQLException encountered:" + e); - } finally { - // Close the database connection. - db.close(); - } - } -} diff --git a/src/current/_includes/v1.0/app/activerecord-basic-sample.rb b/src/current/_includes/v1.0/app/activerecord-basic-sample.rb deleted file mode 100644 index 35bccdcb7e8..00000000000 --- a/src/current/_includes/v1.0/app/activerecord-basic-sample.rb +++ /dev/null @@ -1,45 +0,0 @@ -require 'active_record' -require 'pg' -require 'activerecord-cockroachdb-adapter' - -# Connect to CockroachDB through ActiveRecord. -# In Rails, this configuration would go in config/database.yml as usual. -ActiveRecord::Base.establish_connection( - adapter: 'cockroachdb', - username: 'maxroach', - password: '', - database: 'bank', - host: 'localhost', - port: 26257, -) - - -# Define the Account model. -# In Rails, this would go in app/models/ as usual. -class Account < ActiveRecord::Base - validates :id, presence: true - validates :balance, presence: true -end - -# Define a migration for the accounts table. -# In Rails, this would go in db/migrate/ as usual. -class Schema < ActiveRecord::Migration - def change - create_table :accounts, force: true do |t| - t.integer :balance - end - end -end - -# Run the schema migration by hand. -# In Rails, this would be done via rake db:migrate as usual. -Schema.new.change() - -# Create two accounts, inserting two rows into the accounts table. -Account.create(id: 1, balance: 1000) -Account.create(id: 2, balance: 250) - -# Retrieve accounts and print out the balances -Account.all.each do |acct| - puts "#{acct.id} #{acct.balance}" -end diff --git a/src/current/_includes/v1.0/app/basic-sample.c b/src/current/_includes/v1.0/app/basic-sample.c deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/current/_includes/v1.0/app/basic-sample.clj b/src/current/_includes/v1.0/app/basic-sample.clj deleted file mode 100644 index b139d27b8e1..00000000000 --- a/src/current/_includes/v1.0/app/basic-sample.clj +++ /dev/null @@ -1,31 +0,0 @@ -(ns test.test - (:require [clojure.java.jdbc :as j] - [test.util :as util])) - -;; Define the connection parameters to the cluster. -(def db-spec {:subprotocol "postgresql" - :subname "//localhost:26257/bank" - :user "maxroach" - :password ""}) - -(defn test-basic [] - ;; Connect to the cluster and run the code below with - ;; the connection object bound to 'conn'. - (j/with-db-connection [conn db-spec] - - ;; Insert two rows into the "accounts" table. - (j/insert! conn :accounts {:id 1 :balance 1000}) - (j/insert! conn :accounts {:id 2 :balance 250}) - - ;; Print out the balances. - (println "Initial balances:") - (->> (j/query conn ["SELECT id, balance FROM accounts"]) - (map println) - doall) - - ;; The database connection is automatically closed by with-db-connection. - )) - - -(defn -main [& args] - (test-basic)) diff --git a/src/current/_includes/v1.0/app/basic-sample.cpp b/src/current/_includes/v1.0/app/basic-sample.cpp deleted file mode 100644 index 0cdb6f65bfd..00000000000 --- a/src/current/_includes/v1.0/app/basic-sample.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Build with g++ -std=c++11 basic-sample.cpp -lpq -lpqxx - -#include -#include -#include -#include -#include -#include - -using namespace std; - -int main() { - try { - // Connect to the "bank" database. - pqxx::connection c("postgresql://maxroach@localhost:26257/bank"); - - pqxx::nontransaction w(c); - - // Create the "accounts" table. - w.exec("CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT)"); - - // Insert two rows into the "accounts" table. - w.exec("INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)"); - - // Print out the balances. - cout << "Initial balances:" << endl; - pqxx::result r = w.exec("SELECT id, balance FROM accounts"); - for (auto row : r) { - cout << row[0].as() << ' ' << row[1].as() << endl; - } - - w.commit(); // Note this doesn't doesn't do anything - // for a nontransaction, but is still required. - } - catch (const exception &e) { - cerr << e.what() << endl; - return 1; - } - cout << "Success" << endl; - return 0; -} diff --git a/src/current/_includes/v1.0/app/basic-sample.go b/src/current/_includes/v1.0/app/basic-sample.go deleted file mode 100644 index 6a647f51641..00000000000 --- a/src/current/_includes/v1.0/app/basic-sample.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - - _ "github.com/lib/pq" -) - -func main() { - // Connect to the "bank" database. - db, err := sql.Open("postgres", "postgresql://maxroach@localhost:26257/bank?sslmode=disable") - if err != nil { - log.Fatal("error connecting to the database: ", err) - } - - // Create the "accounts" table. - if _, err := db.Exec( - "CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT)"); err != nil { - log.Fatal(err) - } - - // Insert two rows into the "accounts" table. - if _, err := db.Exec( - "INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)"); err != nil { - log.Fatal(err) - } - - // Print out the balances. - rows, err := db.Query("SELECT id, balance FROM accounts") - if err != nil { - log.Fatal(err) - } - defer rows.Close() - fmt.Println("Initial balances:") - for rows.Next() { - var id, balance int - if err := rows.Scan(&id, &balance); err != nil { - log.Fatal(err) - } - fmt.Printf("%d %d\n", id, balance) - } -} diff --git a/src/current/_includes/v1.0/app/basic-sample.js b/src/current/_includes/v1.0/app/basic-sample.js deleted file mode 100644 index 2fd656eac23..00000000000 --- a/src/current/_includes/v1.0/app/basic-sample.js +++ /dev/null @@ -1,55 +0,0 @@ -var async = require('async'); - -// Require the driver. -var pg = require('pg'); - -// Connect to the "bank" database. -var config = { - user: 'maxroach', - host: 'localhost', - database: 'bank', - port: 26257 -}; - -// Create a pool. -var pool = new pg.Pool(config); - -pool.connect(function (err, client, done) { - // Closes communication with the database and exits. - var finish = function () { - done(); - process.exit(); - }; - - if (err) { - console.error('could not connect to cockroachdb', err); - finish(); - } - async.waterfall([ - function (next) { - // Create the "accounts" table. - client.query('CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT);', next); - }, - function (results, next) { - // Insert two rows into the "accounts" table. - client.query('INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250);', next); - }, - function (results, next) { - // Print out the balances. - client.query('SELECT id, balance FROM accounts;', next); - }, - ], - function (err, results) { - if (err) { - console.error('error inserting into and selecting from accounts', err); - finish(); - } - - console.log('Initial balances:'); - results.rows.forEach(function (row) { - console.log(row); - }); - - finish(); - }); -}); diff --git a/src/current/_includes/v1.0/app/basic-sample.php b/src/current/_includes/v1.0/app/basic-sample.php deleted file mode 100644 index db5a26e3111..00000000000 --- a/src/current/_includes/v1.0/app/basic-sample.php +++ /dev/null @@ -1,20 +0,0 @@ - PDO::ERRMODE_EXCEPTION, - PDO::ATTR_EMULATE_PREPARES => true, - PDO::ATTR_PERSISTENT => true - )); - - $dbh->exec('INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)'); - - print "Account balances:\r\n"; - foreach ($dbh->query('SELECT id, balance FROM accounts') as $row) { - print $row['id'] . ': ' . $row['balance'] . "\r\n"; - } -} catch (Exception $e) { - print $e->getMessage() . "\r\n"; - exit(1); -} -?> diff --git a/src/current/_includes/v1.0/app/basic-sample.py b/src/current/_includes/v1.0/app/basic-sample.py deleted file mode 100644 index 82257837402..00000000000 --- a/src/current/_includes/v1.0/app/basic-sample.py +++ /dev/null @@ -1,28 +0,0 @@ -# Import the driver. -import psycopg2 - -# Connect to the "bank" database. -conn = psycopg2.connect(database='bank', user='maxroach', host='localhost', port=26257) - -# Make each statement commit immediately. -conn.set_session(autocommit=True) - -# Open a cursor to perform database operations. -cur = conn.cursor() - -# Create the "accounts" table. -cur.execute("CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT)") - -# Insert two rows into the "accounts" table. -cur.execute("INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)") - -# Print out the balances. -cur.execute("SELECT id, balance FROM accounts") -rows = cur.fetchall() -print('Initial balances:') -for row in rows: - print([str(cell) for cell in row]) - -# Close the database connection. -cur.close() -conn.close() diff --git a/src/current/_includes/v1.0/app/basic-sample.rb b/src/current/_includes/v1.0/app/basic-sample.rb deleted file mode 100644 index 7ffa928066d..00000000000 --- a/src/current/_includes/v1.0/app/basic-sample.rb +++ /dev/null @@ -1,22 +0,0 @@ -# Import the driver. -require 'pg' - -# Connect to the "bank" database. -conn = PG.connect(user: 'maxroach', dbname: 'bank', host: 'localhost', port: 26257) - -# Create the "accounts" table. -conn.exec('CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT)') - -# Insert two rows into the "accounts" table. -conn.exec('INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)') - -# Print out the balances. -puts 'Initial balances:' -conn.exec('SELECT id, balance FROM accounts') do |res| - res.each do |row| - puts row - end -end - -# Close communication with the database. -conn.close() diff --git a/src/current/_includes/v1.0/app/basic-sample.rs b/src/current/_includes/v1.0/app/basic-sample.rs deleted file mode 100644 index f381d500028..00000000000 --- a/src/current/_includes/v1.0/app/basic-sample.rs +++ /dev/null @@ -1,22 +0,0 @@ -extern crate postgres; - -use postgres::{Connection, TlsMode}; - -fn main() { - let conn = Connection::connect("postgresql://maxroach@localhost:26257/bank", TlsMode::None) - .unwrap(); - - // Insert two rows into the "accounts" table. - conn.execute( - "INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)", - &[], - ).unwrap(); - - // Print out the balances. - println!("Initial balances:"); - for row in &conn.query("SELECT id, balance FROM accounts", &[]).unwrap() { - let id: i64 = row.get(0); - let balance: i64 = row.get(1); - println!("{} {}", id, balance); - } -} diff --git a/src/current/_includes/v1.0/app/common-steps.md b/src/current/_includes/v1.0/app/common-steps.md deleted file mode 100644 index 76dfe6a008c..00000000000 --- a/src/current/_includes/v1.0/app/common-steps.md +++ /dev/null @@ -1,36 +0,0 @@ -## Step 2. Start a single-node cluster - -For the purpose of this tutorial, you need only one CockroachDB node running in insecure mode: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start \ ---insecure \ ---store=hello-1 \ ---host=localhost -~~~ - -## Step 3. Create a user - -In a new terminal, as the `root` user, use the [`cockroach user`](create-and-manage-users.html) command to create a new user, `maxroach`. - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach user set maxroach --insecure -~~~ - -## Step 4. Create a database and grant privileges - -As the `root` user, use the [built-in SQL client](use-the-built-in-sql-client.html) to create a `bank` database. - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'CREATE DATABASE bank' -~~~ - -Then [grant privileges](grant.html) to the `maxroach` user. - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'GRANT ALL ON DATABASE bank TO maxroach' -~~~ diff --git a/src/current/_includes/v1.0/app/gorm-basic-sample.go b/src/current/_includes/v1.0/app/gorm-basic-sample.go deleted file mode 100644 index b8529962c2b..00000000000 --- a/src/current/_includes/v1.0/app/gorm-basic-sample.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "fmt" - "log" - - // Import GORM-related packages. - "github.com/jinzhu/gorm" - _ "github.com/jinzhu/gorm/dialects/postgres" -) - -// Account is our model, which corresponds to the "accounts" database table. -type Account struct { - ID int `gorm:"primary_key"` - Balance int -} - -func main() { - // Connect to the "bank" database as the "maxroach" user. - const addr = "postgresql://maxroach@localhost:26257/bank?sslmode=disable" - db, err := gorm.Open("postgres", addr) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - // Automatically create the "accounts" table based on the Account model. - db.AutoMigrate(&Account{}) - - // Insert two rows into the "accounts" table. - db.Create(&Account{ID: 1, Balance: 1000}) - db.Create(&Account{ID: 2, Balance: 250}) - - // Print out the balances. - var accounts []Account - db.Find(&accounts) - fmt.Println("Initial balances:") - for _, account := range accounts { - fmt.Printf("%d %d\n", account.ID, account.Balance) - } -} diff --git a/src/current/_includes/v1.0/app/hibernate-basic-sample/Sample.java b/src/current/_includes/v1.0/app/hibernate-basic-sample/Sample.java deleted file mode 100644 index ed36ae15ad3..00000000000 --- a/src/current/_includes/v1.0/app/hibernate-basic-sample/Sample.java +++ /dev/null @@ -1,64 +0,0 @@ -package com.cockroachlabs; - -import org.hibernate.Session; -import org.hibernate.SessionFactory; -import org.hibernate.cfg.Configuration; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.Id; -import javax.persistence.Table; -import javax.persistence.criteria.CriteriaQuery; - -public class Sample { - // Create a SessionFactory based on our hibernate.cfg.xml configuration - // file, which defines how to connect to the database. - private static final SessionFactory sessionFactory = - new Configuration() - .configure("hibernate.cfg.xml") - .addAnnotatedClass(Account.class) - .buildSessionFactory(); - - // Account is our model, which corresponds to the "accounts" database table. - @Entity - @Table(name="accounts") - public static class Account { - @Id - @Column(name="id") - public long id; - - @Column(name="balance") - public long balance; - - // Convenience constructor. - public Account(int id, int balance) { - this.id = id; - this.balance = balance; - } - - // Hibernate needs a default (no-arg) constructor to create model objects. - public Account() {} - } - - public static void main(String[] args) throws Exception { - Session session = sessionFactory.openSession(); - - try { - // Insert two rows into the "accounts" table. - session.beginTransaction(); - session.save(new Account(1, 1000)); - session.save(new Account(2, 250)); - session.getTransaction().commit(); - - // Print out the balances. - CriteriaQuery query = session.getCriteriaBuilder().createQuery(Account.class); - query.select(query.from(Account.class)); - for (Account account : session.createQuery(query).getResultList()) { - System.out.printf("%d %d\n", account.id, account.balance); - } - } finally { - session.close(); - sessionFactory.close(); - } - } -} diff --git a/src/current/_includes/v1.0/app/hibernate-basic-sample/build.gradle b/src/current/_includes/v1.0/app/hibernate-basic-sample/build.gradle deleted file mode 100644 index 6f95f22a964..00000000000 --- a/src/current/_includes/v1.0/app/hibernate-basic-sample/build.gradle +++ /dev/null @@ -1,16 +0,0 @@ -group 'com.cockroachlabs' -version '1.0' - -apply plugin: 'java' -apply plugin: 'application' - -mainClassName = 'com.cockroachlabs.Sample' - -repositories { - mavenCentral() -} - -dependencies { - compile 'org.hibernate:hibernate-core:5.2.4.Final' - compile 'org.postgresql:postgresql:9.4.1208' -} diff --git a/src/current/_includes/v1.0/app/hibernate-basic-sample/hibernate-basic-sample.tgz b/src/current/_includes/v1.0/app/hibernate-basic-sample/hibernate-basic-sample.tgz deleted file mode 100644 index d0e195b389016449e56115932da7d9858002e366..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1613 zcmV-T2D14diwFRcF{4-j1MOMsbJ{o%?`QrBm6;AO6J+CHX!D?tB%RAlr|BQqgqixi8e%mDIcYxL{#$u+i0|A zpWKhx^*=(BZ|8rQ@Q~uMA~a+V)&cS8b-UI1?{@ui{yo?0b)F#i5&XN8zVrN#l}uvP z2xNo<8GKTb1e1^qb)$C8l;%>PMhCl%T8+eUc!T0F8FO)f8Xw6uX)OO}zw&?>5Ce&b zaB&h6t>2S~q2q5T!Ivr8fLF0tYHpCfT8!tD!?=y>k)25A1Llk_)I zMg40PJl+|w@$d9ELJc0n-lv!TstVp8sCQ_qx0MZ-X|^|BMp{qcQ#x zg&XU*qyO*t{&N1i{cdm9|8IkiUP2~O-s>Nyy*`2tTT(8@$Bo2{+Rqy=2eqT8r$3%t z{c>@J7F%mE;^Rc6{XV+*`OUlYleD|GRBt=Z4+ZP|4xN;QPOnbUK6V}F?0tPMIx!|b zaGdFMI&bMQbm|T@)Tz+lhHWsc*TL2bmO-lFNHcAHZI4I*^Ty3!ruXP+yNynXAy5en zMg$>PkXoskF%S-o+U*7ISjm_vbAtqoH^=okKcrBr>{B0 zDy$Da(qXVTp*T**>x0)W?i_eM&+j`!B0jy;I*cS`$CPU_4B34(X&btT)dxySvnX7S z)8&VEh;kBw*xInYc3O=^5eBuPll04X^2T;WPOmSS`Orqvx20fDs&G1 zwIk$4{U40W59tB#-~V;I&hGr*3T>SK(up<(m<+Yb-&zu)ig=D)4bU(A2@;tt!@ zyLH^r|8-rc`C0$x?L+>1Zoj+h|F%If2|kgrZFiR5%^ucjJc^|2Q=H(F z{N_W#tYft}8HPMSX_JSta>(CFI}SR@niGP`@j$jqp|(GPnF_58RH)Q56nl+$$o9~5 z!h;EPGb1jTMiV(jMixD)ymu ztrvs~vstsg;#0jshEV!i2nn_^n*K@9d>sTb5eBCbt&mFpmMZ(uY#r8$P~}@7uG0Y9 zHaILo7f1zdb1VhbMZD@HxV(6DlPSgVj3l8^vAD+Ke^1!;>(DXy8|7S$Fw&=w~)fOvU_ z-1H{5fuwuLUh@rG3;tL#6Rt6*=r}cXWhIw_<(Wan`fbUy`FJQ8gZvN+bdrPyHHB;w zHEu0hoFYFfur$L+4nIN_=t@t(?AzLc)^ftGC3s-Zy5^-(Fem)_I|7Zm1wJS_)#&U? zz~XccoI9UGu&5vKuv|$P1_hBb%>bIl1*)tZLva6Gz|aL%Je4Tb2oYZa=mIVa%2nba z8*_1`h|nZ3xx#PpX>tt+hN3Rw(%C~DIICKl@_T#8^DEiM%#@6P$3hfwQ*u9d_(EAw z$;6~y=HSp}?@G_xquhi6`fR_><88GToNw&IfhnswMMo;R^jImIo^FOUvtws7`!Z58 zT9R238UX?e4wNSd9n1_`)SYhGvNrz2G{E^CoGQ)Mauub3x`Fd1!VnT1+u)3v^=A}4 zqrZvzUZDirYAo)HiY(i9J1?;8fgdizZDwXDHLE~M0pb;&qKf|4|A_8rM?2cl_d$OG LxhPj{04M+enCvV6 diff --git a/src/current/_includes/v1.0/app/hibernate-basic-sample/hibernate.cfg.xml b/src/current/_includes/v1.0/app/hibernate-basic-sample/hibernate.cfg.xml deleted file mode 100644 index 9005c15f7cc..00000000000 --- a/src/current/_includes/v1.0/app/hibernate-basic-sample/hibernate.cfg.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - org.postgresql.Driver - jdbc:postgresql://127.0.0.1:26257/bank?sslmode=disable - root - - - org.hibernate.dialect.PostgreSQL94Dialect - - - create - - diff --git a/src/current/_includes/v1.0/app/project.clj b/src/current/_includes/v1.0/app/project.clj deleted file mode 100644 index 41efc324b59..00000000000 --- a/src/current/_includes/v1.0/app/project.clj +++ /dev/null @@ -1,7 +0,0 @@ -(defproject test "0.1" - :description "CockroachDB test" - :url "http://cockroachlabs.com/" - :dependencies [[org.clojure/clojure "1.8.0"] - [org.clojure/java.jdbc "0.6.1"] - [org.postgresql/postgresql "9.4.1211"]] - :main test.test) diff --git a/src/current/_includes/v1.0/app/sequelize-basic-sample.js b/src/current/_includes/v1.0/app/sequelize-basic-sample.js deleted file mode 100644 index ca92b98e375..00000000000 --- a/src/current/_includes/v1.0/app/sequelize-basic-sample.js +++ /dev/null @@ -1,35 +0,0 @@ -var Sequelize = require('sequelize-cockroachdb'); - -// Connect to CockroachDB through Sequelize. -var sequelize = new Sequelize('bank', 'maxroach', '', { - dialect: 'postgres', - port: 26257, - logging: false -}); - -// Define the Account model for the "accounts" table. -var Account = sequelize.define('accounts', { - id: { type: Sequelize.INTEGER, primaryKey: true }, - balance: { type: Sequelize.INTEGER } -}); - -// Create the "accounts" table. -Account.sync({force: true}).then(function() { - // Insert two rows into the "accounts" table. - return Account.bulkCreate([ - {id: 1, balance: 1000}, - {id: 2, balance: 250} - ]); -}).then(function() { - // Retrieve accounts. - return Account.findAll(); -}).then(function(accounts) { - // Print out the balances. - accounts.forEach(function(account) { - console.log(account.id + ' ' + account.balance); - }); - process.exit(0); -}).catch(function(err) { - console.error('error: ' + err.message); - process.exit(1); -}); diff --git a/src/current/_includes/v1.0/app/sqlalchemy-basic-sample.py b/src/current/_includes/v1.0/app/sqlalchemy-basic-sample.py deleted file mode 100644 index 8948d04c92f..00000000000 --- a/src/current/_includes/v1.0/app/sqlalchemy-basic-sample.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import print_function -from sqlalchemy import create_engine, Column, Integer -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker - -Base = declarative_base() - -# The Account class corresponds to the "accounts" database table. -class Account(Base): - __tablename__ = 'accounts' - id = Column(Integer, primary_key=True) - balance = Column(Integer) - -# Create an engine to communicate with the database. The "cockroachdb://" prefix -# for the engine URL indicates that we are connecting to CockroachDB. -engine = create_engine("cockroachdb://maxroach@localhost:26257/bank?sslmode=disable") -Session = sessionmaker(bind=engine) - -# Automatically create the "accounts" table based on the Account class. -Base.metadata.create_all(engine) - -# Insert two rows into the "accounts" table. -session = Session() -session.add_all([ - Account(id=1, balance=1000), - Account(id=2, balance=250), -]) -session.commit() - -# Print out the balances. -for account in session.query(Account): - print(account.id, account.balance) diff --git a/src/current/_includes/v1.0/app/txn-sample.clj b/src/current/_includes/v1.0/app/txn-sample.clj deleted file mode 100644 index 75ee7b4ba62..00000000000 --- a/src/current/_includes/v1.0/app/txn-sample.clj +++ /dev/null @@ -1,43 +0,0 @@ -(ns test.test - (:require [clojure.java.jdbc :as j] - [test.util :as util])) - -;; Define the connection parameters to the cluster. -(def db-spec {:subprotocol "postgresql" - :subname "//localhost:26257/bank" - :user "maxroach" - :password ""}) - -;; The transaction we want to run. -(defn transferFunds - [txn from to amount] - - ;; Check the current balance. - (let [fromBalance (->> (j/query txn ["SELECT balance FROM accounts WHERE id = ?" from]) - (mapv :balance) - (first))] - (when (< fromBalance amount) - (throw (Exception. "Insufficient funds")))) - - ;; Perform the transfer. - (j/execute! txn [(str "UPDATE accounts SET balance = balance - " amount " WHERE id = " from)]) - (j/execute! txn [(str "UPDATE accounts SET balance = balance + " amount " WHERE id = " to)])) - -(defn test-txn [] - ;; Connect to the cluster and run the code below with - ;; the connection object bound to 'conn'. - (j/with-db-connection [conn db-spec] - - ;; Execute the transaction within an automatic retry block; - ;; the transaction object is bound to 'txn'. - (util/with-txn-retry [txn conn] - (transferFunds txn 1 2 100)) - - ;; Execute a query outside of an automatic retry block. - (println "Balances after transfer:") - (->> (j/query conn ["SELECT id, balance FROM accounts"]) - (map println) - (doall)))) - -(defn -main [& args] - (test-txn)) diff --git a/src/current/_includes/v1.0/app/txn-sample.cpp b/src/current/_includes/v1.0/app/txn-sample.cpp deleted file mode 100644 index dcdf0ca973d..00000000000 --- a/src/current/_includes/v1.0/app/txn-sample.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Build with g++ -std=c++11 txn-sample.cpp -lpq -lpqxx - -#include -#include -#include -#include -#include -#include - -using namespace std; - -void transferFunds( - pqxx::dbtransaction *tx, int from, int to, int amount) { - // Read the balance. - pqxx::result r = tx->exec( - "SELECT balance FROM accounts WHERE id = " + to_string(from)); - assert(r.size() == 1); - int fromBalance = r[0][0].as(); - - if (fromBalance < amount) { - throw domain_error("insufficient funds"); - } - - // Perform the transfer. - tx->exec("UPDATE accounts SET balance = balance - " - + to_string(amount) + " WHERE id = " + to_string(from)); - tx->exec("UPDATE accounts SET balance = balance + " - + to_string(amount) + " WHERE id = " + to_string(to)); -} - - -// ExecuteTx runs fn inside a transaction and retries it as needed. -// On non-retryable failures, the transaction is aborted and rolled -// back; on success, the transaction is committed. -// -// For more information about CockroachDB's transaction model see -// https://cockroachlabs.com/docs/transactions.html. -// -// NOTE: the supplied exec closure should not have external side -// effects beyond changes to the database. -void executeTx( - pqxx::connection *c, function fn) { - pqxx::work tx(*c); - while (true) { - try { - pqxx::subtransaction s(tx, "cockroach_restart"); - fn(&s); - s.commit(); - break; - } catch (const pqxx::pqxx_exception& e) { - // Swallow "transaction restart" errors; the transaction will be retried. - // Unfortunately libpqxx doesn't give us access to the error code, so we - // do string matching to identify retriable errors. - if (string(e.base().what()).find("restart transaction:") == string::npos) { - throw; - } - } - } - tx.commit(); -} - -int main() { - try { - pqxx::connection c("postgresql://maxroach@localhost:26257/bank"); - - executeTx(&c, [](pqxx::dbtransaction *tx) { - transferFunds(tx, 1, 2, 100); - }); - } - catch (const exception &e) { - cerr << e.what() << endl; - return 1; - } - cout << "Success" << endl; - return 0; -} diff --git a/src/current/_includes/v1.0/app/txn-sample.go b/src/current/_includes/v1.0/app/txn-sample.go deleted file mode 100644 index 2c0cd1b6da6..00000000000 --- a/src/current/_includes/v1.0/app/txn-sample.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "context" - "database/sql" - "fmt" - "log" - - "github.com/cockroachdb/cockroach-go/crdb" -) - -func transferFunds(tx *sql.Tx, from int, to int, amount int) error { - // Read the balance. - var fromBalance int - if err := tx.QueryRow( - "SELECT balance FROM accounts WHERE id = $1", from).Scan(&fromBalance); err != nil { - return err - } - - if fromBalance < amount { - return fmt.Errorf("insufficient funds") - } - - // Perform the transfer. - if _, err := tx.Exec( - "UPDATE accounts SET balance = balance - $1 WHERE id = $2", amount, from); err != nil { - return err - } - if _, err := tx.Exec( - "UPDATE accounts SET balance = balance + $1 WHERE id = $2", amount, to); err != nil { - return err - } - return nil -} - -func main() { - db, err := sql.Open("postgres", "postgresql://maxroach@localhost:26257/bank?sslmode=disable") - if err != nil { - log.Fatal("error connecting to the database: ", err) - } - - // Run a transfer in a transaction. - err = crdb.ExecuteTx(context.Background(), db, nil, func(tx *sql.Tx) error { - return transferFunds(tx, 1 /* from acct# */, 2 /* to acct# */, 100 /* amount */) - }) - if err == nil { - fmt.Println("Success") - } else { - log.Fatal("error: ", err) - } -} diff --git a/src/current/_includes/v1.0/app/txn-sample.js b/src/current/_includes/v1.0/app/txn-sample.js deleted file mode 100644 index 6e481507b01..00000000000 --- a/src/current/_includes/v1.0/app/txn-sample.js +++ /dev/null @@ -1,141 +0,0 @@ -var async = require('async'); - -// Require the driver. -var pg = require('pg'); - -// Connect to the cluster. -var config = { - user: 'maxroach', - host: 'localhost', - database: 'bank', - port: 26257 -}; - -// Wrapper for a transaction. -// This automatically re-calls "op" with the client as an argument as -// long as the database server asks for the transaction to be retried. -function txnWrapper(client, op, next) { - client.query('BEGIN; SAVEPOINT cockroach_restart', function (err) { - if (err) { - return next(err); - } - - var released = false; - async.doWhilst(function (done) { - var handleError = function (err) { - // If we got an error, see if it's a retryable one and, if so, restart. - if (err.code === '40001') { - // Signal the database that we'll retry. - return client.query('ROLLBACK TO SAVEPOINT cockroach_restart', done); - } - // A non-retryable error; break out of the doWhilst with an error. - return done(err); - }; - - // Attempt the work. - op(client, function (err) { - if (err) { - return handleError(err); - } - var opResults = arguments; - - // If we reach this point, release and commit. - client.query('RELEASE SAVEPOINT cockroach_restart', function (err) { - if (err) { - return handleError(err); - } - released = true; - return done.apply(null, opResults); - }); - }); - }, - function () { - return !released; - }, - function (err) { - if (err) { - client.query('ROLLBACK', function () { - next(err); - }); - } else { - var txnResults = arguments; - client.query('COMMIT', function(err) { - if (err) { - return next(err); - } else { - return next.apply(null, txnResults); - } - }); - } - }); - }); -} - -// The transaction we want to run. -function transferFunds(client, from, to, amount, next) { - // Check the current balance. - client.query('SELECT balance FROM accounts WHERE id = $1', [from], function (err, results) { - if (err) { - return next(err); - } else if (results.rows.length === 0) { - return next(new Error('account not found in table')); - } - - var acctBal = results.rows[0].balance; - if (acctBal >= amount) { - // Perform the transfer. - async.waterfall([ - function (next) { - // Subtract amount from account 1. - client.query('UPDATE accounts SET balance = balance - $1 WHERE id = $2', [amount, from], next); - }, - function (updateResult, next) { - // Add amount to account 2. - client.query('UPDATE accounts SET balance = balance + $1 WHERE id = $2', [amount, to], next); - }, function (updateResult, next) { - // Fetch account balances after updates. - client.query('SELECT id, balance FROM accounts', function (err, selectResult) { - next(err, selectResult ? selectResult.rows : null); - }); - } - ], next); - } else { - next(new Error('insufficient funds')); - } - }); -} - -// Create a pool. -var pool = new pg.Pool(config); - -pool.connect(function (err, client, done) { - // Closes communication with the database and exits. - var finish = function () { - done(); - process.exit(); - }; - - if (err) { - console.error('could not connect to cockroachdb', err); - finish(); - } - - // Execute the transaction. - txnWrapper(client, - function (client, next) { - transferFunds(client, 1, 2, 100, next); - }, - function (err, results) { - if (err) { - console.error('error performing transaction', err); - finish(); - } - - console.log('Balances after transfer:'); - results.forEach(function (result) { - console.log(result); - }); - - finish(); - }); -}); diff --git a/src/current/_includes/v1.0/app/txn-sample.php b/src/current/_includes/v1.0/app/txn-sample.php deleted file mode 100644 index e060d311cc3..00000000000 --- a/src/current/_includes/v1.0/app/txn-sample.php +++ /dev/null @@ -1,71 +0,0 @@ -beginTransaction(); - // This savepoint allows us to retry our transaction. - $dbh->exec("SAVEPOINT cockroach_restart"); - } catch (Exception $e) { - throw $e; - } - - while (true) { - try { - $stmt = $dbh->prepare( - 'UPDATE accounts SET balance = balance + :deposit ' . - 'WHERE id = :account AND (:deposit > 0 OR balance + :deposit >= 0)'); - - // First, withdraw the money from the old account (if possible). - $stmt->bindValue(':account', $from, PDO::PARAM_INT); - $stmt->bindValue(':deposit', -$amount, PDO::PARAM_INT); - $stmt->execute(); - if ($stmt->rowCount() == 0) { - print "source account does not exist or is underfunded\r\n"; - return; - } - - // Next, deposit into the new account (if it exists). - $stmt->bindValue(':account', $to, PDO::PARAM_INT); - $stmt->bindValue(':deposit', $amount, PDO::PARAM_INT); - $stmt->execute(); - if ($stmt->rowCount() == 0) { - print "destination account does not exist\r\n"; - return; - } - - // Attempt to release the savepoint (which is really the commit). - $dbh->exec('RELEASE SAVEPOINT cockroach_restart'); - $dbh->commit(); - return; - } catch (PDOException $e) { - if ($e->getCode() != '40001') { - // Non-recoverable error. Rollback and bubble error up the chain. - $dbh->rollBack(); - throw $e; - } else { - // Cockroach transaction retry code. Rollback to the savepoint and - // restart. - $dbh->exec('ROLLBACK TO SAVEPOINT cockroach_restart'); - } - } - } -} - -try { - $dbh = new PDO('pgsql:host=localhost;port=26257;dbname=bank;sslmode=disable', - 'maxroach', null, array( - PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION, - PDO::ATTR_EMULATE_PREPARES => true, - )); - - transferMoney($dbh, 1, 2, 10); - - print "Account balances after transfer:\r\n"; - foreach ($dbh->query('SELECT id, balance FROM accounts') as $row) { - print $row['id'] . ': ' . $row['balance'] . "\r\n"; - } -} catch (Exception $e) { - print $e->getMessage() . "\r\n"; - exit(1); -} -?> diff --git a/src/current/_includes/v1.0/app/txn-sample.py b/src/current/_includes/v1.0/app/txn-sample.py deleted file mode 100644 index e85d78e1818..00000000000 --- a/src/current/_includes/v1.0/app/txn-sample.py +++ /dev/null @@ -1,68 +0,0 @@ -# Import the driver. -import psycopg2 -import psycopg2.errorcodes - -# Connect to the cluster. -conn = psycopg2.connect(database='bank', user='maxroach', host='localhost', port=26257) - - -def onestmt(conn, sql): - with conn.cursor() as cur: - cur.execute(sql) - - -# Wrapper for a transaction. -# This automatically re-calls "op" with the open transaction as an argument -# as long as the database server asks for the transaction to be retried. -def run_transaction(conn, op): - with conn: - onestmt(conn, "SAVEPOINT cockroach_restart") - while True: - try: - # Attempt the work. - op(conn) - - # If we reach this point, commit. - onestmt(conn, "RELEASE SAVEPOINT cockroach_restart") - break - - except psycopg2.OperationalError as e: - if e.pgcode != psycopg2.errorcodes.SERIALIZATION_FAILURE: - # A non-retryable error; report this up the call stack. - raise e - # Signal the database that we'll retry. - onestmt(conn, "ROLLBACK TO SAVEPOINT cockroach_restart") - - -# The transaction we want to run. -def transfer_funds(txn, frm, to, amount): - with txn.cursor() as cur: - - # Check the current balance. - cur.execute("SELECT balance FROM accounts WHERE id = " + str(frm)) - from_balance = cur.fetchone()[0] - if from_balance < amount: - raise "Insufficient funds" - - # Perform the transfer. - cur.execute("UPDATE accounts SET balance = balance - %s WHERE id = %s", - (amount, frm)) - cur.execute("UPDATE accounts SET balance = balance + %s WHERE id = %s", - (amount, to)) - - -# Execute the transaction. -run_transaction(conn, lambda conn: transfer_funds(conn, 1, 2, 100)) - - -with conn: - with conn.cursor() as cur: - # Check account balances. - cur.execute("SELECT id, balance FROM accounts") - rows = cur.fetchall() - print('Balances after transfer:') - for row in rows: - print([str(cell) for cell in row]) - -# Close communication with the database. -conn.close() diff --git a/src/current/_includes/v1.0/app/txn-sample.rb b/src/current/_includes/v1.0/app/txn-sample.rb deleted file mode 100644 index 8f84d975353..00000000000 --- a/src/current/_includes/v1.0/app/txn-sample.rb +++ /dev/null @@ -1,43 +0,0 @@ -# Import the driver. -require 'pg' - -# Wrapper for a transaction. -# This automatically re-calls "op" with the open transaction as an argument -# as long as the database server asks for the transaction to be retried. -def run_transaction(conn) - conn.transaction do |txn| - txn.exec('SAVEPOINT cockroach_restart') - while - begin - # Attempt the work. - yield txn - - # If we reach this point, commit. - txn.exec('RELEASE SAVEPOINT cockroach_restart') - break - rescue PG::TRSerializationFailure - txn.exec('ROLLBACK TO SAVEPOINT cockroach_restart') - end - end - end -end - -def transfer_funds(txn, from, to, amount) - txn.exec_params('SELECT balance FROM accounts WHERE id = $1', [from]) do |res| - res.each do |row| - raise 'insufficient funds' if Integer(row['balance']) < amount - end - end - txn.exec_params('UPDATE accounts SET balance = balance - $1 WHERE id = $2', [amount, from]) - txn.exec_params('UPDATE accounts SET balance = balance + $1 WHERE id = $2', [amount, to]) -end - -# Connect to the "bank" database. -conn = PG.connect(user: 'maxroach', dbname: 'bank', host: 'localhost', port: 26257) - -run_transaction(conn) do |txn| - transfer_funds(txn, 1, 2, 100) -end - -# Close communication with the database. -conn.close() diff --git a/src/current/_includes/v1.0/app/txn-sample.rs b/src/current/_includes/v1.0/app/txn-sample.rs deleted file mode 100644 index e2282c56ea1..00000000000 --- a/src/current/_includes/v1.0/app/txn-sample.rs +++ /dev/null @@ -1,59 +0,0 @@ -extern crate postgres; - -use postgres::{Connection, TlsMode, Result}; -use postgres::transaction::Transaction; -use self::postgres::error::T_R_SERIALIZATION_FAILURE; - -/// Runs op inside a transaction and retries it as needed. -/// On non-retryable failures, the transaction is aborted and -/// rolled back; on success, the transaction is committed. -fn execute_txn(conn: &Connection, mut op: F) -> Result -where - F: FnMut(&Transaction) -> Result, -{ - let txn = conn.transaction()?; - loop { - let sp = txn.savepoint("cockroach_restart")?; - match op(&sp).and_then(|t| sp.commit().map(|_| t)) { - Err(ref err) if err.as_db() - .map(|e| e.code == T_R_SERIALIZATION_FAILURE) - .unwrap_or(false) => {}, - r => break r, - } - }.and_then(|t| txn.commit().map(|_| t)) -} - -fn transfer_funds(txn: &Transaction, from: i64, to: i64, amount: i64) -> Result<()> { - // Read the balance. - let from_balance: i64 = txn.query("SELECT balance FROM accounts WHERE id = $1", &[&from])? - .get(0) - .get(0); - - assert!(from_balance >= amount); - - // Perform the transfer. - txn.execute( - "UPDATE accounts SET balance = balance - $1 WHERE id = $2", - &[&amount, &from], - )?; - txn.execute( - "UPDATE accounts SET balance = balance + $1 WHERE id = $2", - &[&amount, &to], - )?; - Ok(()) -} - -fn main() { - let conn = Connection::connect("postgresql://maxroach@localhost:26257/bank", TlsMode::None) - .unwrap(); - - // Run a transfer in a transaction. - execute_txn(&conn, |txn| transfer_funds(txn, 1, 2, 100)).unwrap(); - - // Check account balances after the transaction. - for row in &conn.query("SELECT id, balance FROM accounts", &[]).unwrap() { - let id: i64 = row.get(0); - let balance: i64 = row.get(1); - println!("{} {}", id, balance); - } -} diff --git a/src/current/_includes/v1.0/app/util.clj b/src/current/_includes/v1.0/app/util.clj deleted file mode 100644 index d040affe794..00000000000 --- a/src/current/_includes/v1.0/app/util.clj +++ /dev/null @@ -1,38 +0,0 @@ -(ns test.util - (:require [clojure.java.jdbc :as j] - [clojure.walk :as walk])) - -(defn txn-restart-err? - "Takes an exception and returns true if it is a CockroachDB retry error." - [e] - (when-let [m (.getMessage e)] - (condp instance? e - java.sql.BatchUpdateException - (and (re-find #"getNextExc" m) - (txn-restart-err? (.getNextException e))) - - org.postgresql.util.PSQLException - (= (.getSQLState e) "40001") ; 40001 is the code returned by CockroachDB retry errors. - - false))) - -;; Wrapper for a transaction. -;; This automatically invokes the body again as long as the database server -;; asks the transaction to be retried. - -(defmacro with-txn-retry - "Wrap an evaluation within a CockroachDB retry block." - [[txn c] & body] - `(j/with-db-transaction [~txn ~c] - (loop [] - (j/execute! ~txn ["savepoint cockroach_restart"]) - (let [res# (try (let [r# (do ~@body)] - {:ok r#}) - (catch java.sql.SQLException e# - (if (txn-restart-err? e#) - {:retry true} - (throw e#))))] - (if (:retry res#) - (do (j/execute! ~txn ["rollback to savepoint cockroach_restart"]) - (recur)) - (:ok res#)))))) diff --git a/src/current/_includes/v1.0/faq/auto-generate-unique-ids.html b/src/current/_includes/v1.0/faq/auto-generate-unique-ids.html deleted file mode 100644 index f97b4efee2d..00000000000 --- a/src/current/_includes/v1.0/faq/auto-generate-unique-ids.html +++ /dev/null @@ -1,15 +0,0 @@ -To auto-generate unique row IDs, use the [`SERIAL`](serial.html) data type, which is an alias for [`INT`](int.html) with the `unique_rowid()` [function](functions-and-operators.html) as the [default value](default-value.html): - -~~~ sql -> CREATE TABLE test (id SERIAL PRIMARY KEY, name STRING); -~~~ - -On insert, the `unique_rowid()` function generates a default value from the timestamp and ID of the node executing the insert, a combination that is likely to be globally unique except in extreme cases where a very large number of IDs (100,000+) are generated per node per second. In such cases, you should use a [`BYTES`](bytes.html) column with the `uuid_v4()` function as the default value instead: - -~~~ sql -> CREATE TABLE test (id BYTES PRIMARY KEY DEFAULT uuid_v4(), name STRING); -~~~ - -Because `BYTES` values are 128-bit, much larger than `INT` values at 64-bit, there is virtually no chance of generating non-unique values. - -The distribution of IDs at the key-value level may also be a consideration. When using `BYTES` with `uuid_v4()` as the default value, consecutively generated IDs will be spread across different key-value ranges (and therefore likely across different nodes), whereas when using `INT` with `unique_rowid()` as the default value, consecutively generated IDs may end up in the same key-value range. diff --git a/src/current/_includes/v1.0/faq/simulate-key-value-store.html b/src/current/_includes/v1.0/faq/simulate-key-value-store.html deleted file mode 100644 index 4772fa5358c..00000000000 --- a/src/current/_includes/v1.0/faq/simulate-key-value-store.html +++ /dev/null @@ -1,13 +0,0 @@ -CockroachDB is a distributed SQL database built on a transactional and strongly-consistent key-value store. Although it is not possible to access the key-value store directly, you can mirror direct access using a "simple" table of two columns, with one set as the primary key: - -~~~ sql -> CREATE TABLE kv (k INT PRIMARY KEY, v BYTES); -~~~ - -When such a "simple" table has no indexes or foreign keys, [`INSERT`](insert.html)/[`UPSERT`](upsert.html)/[`UPDATE`](update.html)/[`DELETE`](delete.html) statements translate to key-value operations with minimal overhead (single digit percent slowdowns). For example, the following `UPSERT` to add or replace a row in the table would translate into a single key-value Put operation: - -~~~ sql -> UPSERT INTO kv VALUES (1, b'hello') -~~~ - -This SQL table approach also offers you a well-defined query language, a known transaction model, and the flexibility to add more columns to the table if the need arises. diff --git a/src/current/_includes/v1.0/faq/when-to-interleave-tables.html b/src/current/_includes/v1.0/faq/when-to-interleave-tables.html deleted file mode 100644 index a65196ad693..00000000000 --- a/src/current/_includes/v1.0/faq/when-to-interleave-tables.html +++ /dev/null @@ -1,5 +0,0 @@ -You're most likely to benefit from interleaved tables when: - - - Your tables form a [hierarchy](interleave-in-parent.html#interleaved-hierarchy) - - Queries maximize the [benefits of interleaving](interleave-in-parent.html#benefits) - - Queries do not suffer too greatly from interleaving's [tradeoffs](interleave-in-parent.html#tradeoffs) diff --git a/src/current/_includes/v1.0/misc/diagnostics-callout.html b/src/current/_includes/v1.0/misc/diagnostics-callout.html deleted file mode 100644 index a969a8cf152..00000000000 --- a/src/current/_includes/v1.0/misc/diagnostics-callout.html +++ /dev/null @@ -1 +0,0 @@ -{{site.data.alerts.callout_info}}By default, each node of a CockroachDB cluster periodically shares anonymous usage details with Cockroach Labs. For an explanation of the details that get shared and how to opt-out of reporting, see Diagnostics Reporting.{{site.data.alerts.end}} diff --git a/src/current/_includes/v1.0/misc/experimental-warning.md b/src/current/_includes/v1.0/misc/experimental-warning.md deleted file mode 100644 index 7ee5c2c1894..00000000000 --- a/src/current/_includes/v1.0/misc/experimental-warning.md +++ /dev/null @@ -1,3 +0,0 @@ -{{site.data.alerts.callout_danger}} -This is an experimental feature. The interface and output of this feature are subject to change. -{{site.data.alerts.end}} diff --git a/src/current/_includes/v1.0/misc/external-urls.md b/src/current/_includes/v1.0/misc/external-urls.md deleted file mode 100644 index c980df83c1b..00000000000 --- a/src/current/_includes/v1.0/misc/external-urls.md +++ /dev/null @@ -1,19 +0,0 @@ -~~~ -[scheme]://[host]/[path]?[parameters] -~~~ - -| Location | scheme | host | parameters | -|----------|--------|------|------------| -| Amazon S3 | `s3` | Bucket name | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` | -| Azure | `azure` | Container name | `AZURE_ACCOUNT_KEY`, `AZURE_ACCOUNT_NAME` | -| Google Cloud [1](#considerations) | `gs` | Bucket name | N/A | -| HTTP | `http` | Remote host | N/A | -| NFS/Local [2](#considerations) | `nodelocal` | File system location | N/A | - -#### Considerations - -- 1 GCS connections use Google's [default authentication strategy](https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application). - -- 2 Because CockroachDB is a distributed system, you cannot meaningfully store backups "locally" on nodes. The entire backup file must be stored in a single location, so attempts to store backups locally must point to an NFS drive to be useful. - -- The location parameters often contain special characters that need to be URI-encoded. Use Javascript's [encodeURIComponent](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent) function or Go language's [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape) function to URI-encode the parameters. Other languages provide similar functions to URI-encode special characters. diff --git a/src/current/_includes/v1.0/misc/logging-flags.md b/src/current/_includes/v1.0/misc/logging-flags.md deleted file mode 100644 index 756d5f920cf..00000000000 --- a/src/current/_includes/v1.0/misc/logging-flags.md +++ /dev/null @@ -1,8 +0,0 @@ -Flag | Description ------|------------ -`--log-dir` | Enable logging to files and write logs to the specified directory.

Setting `--log-dir` to a blank directory (`--log-dir=`) disables logging to files. Do not use `--log-dir=""`; this creates a new directory named `""` and stores log files in that directory. -`--log-dir-max-size` | After the log directory reaches the specified size, delete the oldest log file. The flag's argument takes standard file sizes, such as `--log-dir-max-size=1GiB`.

**Default**: 100MiB -`--log-file-max-size` | After logs reach the specified size, begin writing logs to a new file. The flag's argument takes standard file sizes, such as `--log-file-max-size=2MiB`.

**Default**: 10MiB -`--log-file-verbosity` | Only writes messages to log files if they are at or above the specified [severity level](debug-and-error-logs.html#severity-levels), such as `--log-file-verbosity=WARNING`. **Requires** logging to files.

**Default**: `INFO` -`--logtostderr` | Enable logging to `stderr` for messages at or above the specified [severity level](debug-and-error-logs.html#severity-levels), such as `--logtostderr=ERROR`

If you use this flag without specifying the severity level (e.g., `cockroach start --logtostderr`), it prints messages of *all* severities to `stderr`.

Setting `--logtostderr=NONE` disables logging to `stderr`. -`--no-color` | Do not colorize `stderr`. Possible values: `true` or `false`.

When set to `false`, messages logged to `stderr` are colorized based on [severity level](debug-and-error-logs.html#severity-levels).

**Default:** `false` diff --git a/src/current/_includes/v1.0/misc/prometheus-callout.html b/src/current/_includes/v1.0/misc/prometheus-callout.html deleted file mode 100644 index 989279a7465..00000000000 --- a/src/current/_includes/v1.0/misc/prometheus-callout.html +++ /dev/null @@ -1 +0,0 @@ -{{site.data.alerts.callout_success}}You can also use Prometheus and other third-party, open source tools to monitor and visualize cluster metrics and send notifications based on specified rules. For more details, see Monitor CockroachDB with Prometheus.{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v1.0/misc/remove-user-callout.html b/src/current/_includes/v1.0/misc/remove-user-callout.html deleted file mode 100644 index 086d27509fc..00000000000 --- a/src/current/_includes/v1.0/misc/remove-user-callout.html +++ /dev/null @@ -1 +0,0 @@ -Removing a user does not remove that user's privileges. Therefore, to prevent a future user with an identical username from inheriting an old user's privileges, it's important to revoke a user's privileges before or after removing the user. diff --git a/src/current/_includes/v1.0/sql/diagrams/add_column.html b/src/current/_includes/v1.0/sql/diagrams/add_column.html deleted file mode 100644 index a2c1424aee8..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/add_column.html +++ /dev/null @@ -1,58 +0,0 @@ -
- - - - - - ALTER - - - TABLE - - - IF - - - EXISTS - - - - table_name - - - - ADD - - - COLUMN - - - IF - - - NOT - - - EXISTS - - - - name - - - - - typename - - - - - col_qualification - - - - , - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/add_constraint.html b/src/current/_includes/v1.0/sql/diagrams/add_constraint.html deleted file mode 100644 index 300466bbb65..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/add_constraint.html +++ /dev/null @@ -1,41 +0,0 @@ -
- - - - - - ALTER - - - TABLE - - - IF - - - EXISTS - - - - table_name - - - - ADD - - - CONSTRAINT - - - - name - - - - - constraint_elem - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/alter_column.html b/src/current/_includes/v1.0/sql/diagrams/alter_column.html deleted file mode 100644 index 09b48ee7a83..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/alter_column.html +++ /dev/null @@ -1,59 +0,0 @@ -
- - - - - - ALTER - - - TABLE - - - IF - - - EXISTS - - - - table_name - - - - ALTER - - - COLUMN - - - - name - - - - SET - - - DEFAULT - - - - a_expr - - - - DROP - - - DEFAULT - - - NOT - - - NULL - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/alter_view.html b/src/current/_includes/v1.0/sql/diagrams/alter_view.html deleted file mode 100644 index a6f6c8aa3b0..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/alter_view.html +++ /dev/null @@ -1,36 +0,0 @@ -
- - - - - - ALTER - - - VIEW - - - IF - - - EXISTS - - - - view_name - - - - RENAME - - - TO - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/backup.html b/src/current/_includes/v1.0/sql/diagrams/backup.html deleted file mode 100644 index 83039891de8..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/backup.html +++ /dev/null @@ -1,65 +0,0 @@ -
- - - - - - BACKUP - - - TABLE - - - - table_pattern - - - - , - - - DATABASE - - - - name - - - - , - - - TO - - - - string_or_placeholder - - - - AS OF SYSTEM TIME - - - - timestamp - - - - INCREMENTAL FROM - - - - full_backup_location - - - - , - - - - incremental_backup_location - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/begin_transaction.html b/src/current/_includes/v1.0/sql/diagrams/begin_transaction.html deleted file mode 100644 index de2b51829bd..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/begin_transaction.html +++ /dev/null @@ -1,62 +0,0 @@ -
- - - - - - BEGIN - - - TRANSACTION - - - ISOLATION LEVEL - - - SNAPSHOT - - - SERIALIZABLE - - - , - - - PRIORITY - - - LOW - - - NORMAL - - - HIGH - - - PRIORITY - - - LOW - - - NORMAL - - - HIGH - - - , - - - ISOLATION LEVEL - - - SNAPSHOT - - - SERIALIZABLE - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/check_column_level.html b/src/current/_includes/v1.0/sql/diagrams/check_column_level.html deleted file mode 100644 index 37e81b1b44b..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/check_column_level.html +++ /dev/null @@ -1,70 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_name - - - - - column_type - - - - CHECK - - - ( - - - - check_expr - - - - ) - - - - column_constraints - - - - , - - - - column_def - - - - - table_constraints - - - - ) - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/check_table_level.html b/src/current/_includes/v1.0/sql/diagrams/check_table_level.html deleted file mode 100644 index 95cc61bb012..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/check_table_level.html +++ /dev/null @@ -1,60 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_def - - - - , - - - CONSTRAINT - - - - name - - - - CHECK - - - ( - - - - check_expr - - - - ) - - - - table_constraints - - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/col_qual_list.html b/src/current/_includes/v1.0/sql/diagrams/col_qual_list.html deleted file mode 100644 index 630d74cee22..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/col_qual_list.html +++ /dev/null @@ -1,110 +0,0 @@ -
- - - - - - CONSTRAINT - - - - name - - - - NOT - - - NULL - - - UNIQUE - - - PRIMARY - - - KEY - - - CHECK - - - ( - - - - a_expr - - - - ) - - - DEFAULT - - - - b_expr - - - - REFERENCES - - - - qualified_name - - - - - opt_name_parens - - - - COLLATE - - - - any_name - - - - FAMILY - - - - name - - - - CREATE - - - FAMILY - - - - opt_name - - - - IF - - - NOT - - - EXISTS - - - FAMILY - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/column_def.html b/src/current/_includes/v1.0/sql/diagrams/column_def.html deleted file mode 100644 index cc1452b5e73..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/column_def.html +++ /dev/null @@ -1,23 +0,0 @@ - diff --git a/src/current/_includes/v1.0/sql/diagrams/commit_transaction.html b/src/current/_includes/v1.0/sql/diagrams/commit_transaction.html deleted file mode 100644 index 02051f356d0..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/commit_transaction.html +++ /dev/null @@ -1,17 +0,0 @@ -
- - - - - - COMMIT - - - END - - - TRANSACTION - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/create_database.html b/src/current/_includes/v1.0/sql/diagrams/create_database.html deleted file mode 100644 index 82d08d46ffb..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/create_database.html +++ /dev/null @@ -1,61 +0,0 @@ -
- - - - - - CREATE - - - DATABASE - - - IF - - - NOT - - - EXISTS - - - - name - - - - - opt_with - - - - - opt_template_clause - - - - ENCODING - - - - opt_equal - - - - - non_reserved_word_or_sconst - - - - - opt_lc_collate_clause - - - - - opt_lc_ctype_clause - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/create_index.html b/src/current/_includes/v1.0/sql/diagrams/create_index.html deleted file mode 100644 index 0882b112c53..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/create_index.html +++ /dev/null @@ -1,84 +0,0 @@ -
- - - - - - CREATE - - - UNIQUE - - - INDEX - - - IF - - - NOT - - - EXISTS - - - - index_name - - - - ON - - - - table_name - - - - ( - - - - column_name - - - - ASC - - - DESC - - - , - - - ) - - - COVERING - - - STORING - - - ( - - - - column_name - - - - , - - - ) - - - - opt_interleave - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/create_table.html b/src/current/_includes/v1.0/sql/diagrams/create_table.html deleted file mode 100644 index ae0a9c7b7f6..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/create_table.html +++ /dev/null @@ -1,62 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - IF - - - NOT - - - EXISTS - - - - any_name - - - - ( - - - - column_def - - - - - index_def - - - - - family_def - - - - - table_constraint - - - - , - - - ) - - - - opt_interleave - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/create_table_as.html b/src/current/_includes/v1.0/sql/diagrams/create_table_as.html deleted file mode 100644 index 2b70788adc9..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/create_table_as.html +++ /dev/null @@ -1,50 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - IF - - - NOT - - - EXISTS - - - - any_name - - - - ( - - - - name - - - - , - - - ) - - - AS - - - - select_stmt - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/create_user.html b/src/current/_includes/v1.0/sql/diagrams/create_user.html deleted file mode 100644 index 5d7a7bcfe8a..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/create_user.html +++ /dev/null @@ -1,30 +0,0 @@ -
- - - - - - CREATE - - - USER - - - - name - - - - WITH - - - PASSWORD - - - - password - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/create_view.html b/src/current/_includes/v1.0/sql/diagrams/create_view.html deleted file mode 100644 index 22637afe595..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/create_view.html +++ /dev/null @@ -1,38 +0,0 @@ -
- - - - - - CREATE - - - VIEW - - - - view_name - - - - ( - - - - column_list - - - - ) - - - AS - - - - select_stmt - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/default_value_column_level.html b/src/current/_includes/v1.0/sql/diagrams/default_value_column_level.html deleted file mode 100644 index 242d04c62b9..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/default_value_column_level.html +++ /dev/null @@ -1,64 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_name - - - - - column_type - - - - DEFAULT - - - - default_value - - - - - column_constraints - - - - , - - - - column_def - - - - - table_constraints - - - - ) - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/delete.html b/src/current/_includes/v1.0/sql/diagrams/delete.html deleted file mode 100644 index 67c62fe2bbc..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/delete.html +++ /dev/null @@ -1,63 +0,0 @@ -
- - - - - - DELETE - - - FROM - - - - relation_expr - - - - AS - - - - name - - - - WHERE - - - - a_expr - - - - RETURNING - - - - a_expr - - - - AS - - - - unrestricted_name - - - - identifier - - - * - - - , - - - NOTHING - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/drop_column.html b/src/current/_includes/v1.0/sql/diagrams/drop_column.html deleted file mode 100644 index 02a2da98a00..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/drop_column.html +++ /dev/null @@ -1,48 +0,0 @@ -
- - - - - - ALTER - - - TABLE - - - IF - - - EXISTS - - - - table_name - - - - DROP - - - COLUMN - - - IF - - - EXISTS - - - - name - - - - CASCADE - - - RESTRICT - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/drop_constraint.html b/src/current/_includes/v1.0/sql/diagrams/drop_constraint.html deleted file mode 100644 index be8e9b1bba8..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/drop_constraint.html +++ /dev/null @@ -1,42 +0,0 @@ -
- - - - - - ALTER - - - TABLE - - - IF - - - EXISTS - - - - table_name - - - - DROP - - - CONSTRAINT - - - IF - - - EXISTS - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/drop_database.html b/src/current/_includes/v1.0/sql/diagrams/drop_database.html deleted file mode 100644 index 1abc5cc2815..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/drop_database.html +++ /dev/null @@ -1,25 +0,0 @@ -
- - - - - - DROP - - - DATABASE - - - IF - - - EXISTS - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/drop_index.html b/src/current/_includes/v1.0/sql/diagrams/drop_index.html deleted file mode 100644 index 1e9e18d9842..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/drop_index.html +++ /dev/null @@ -1,42 +0,0 @@ -
- - - - - - DROP - - - INDEX - - - IF - - - EXISTS - - - - table_name - - - - @ - - - - index_name - - - - , - - - CASCADE - - - RESTRICT - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/drop_table.html b/src/current/_includes/v1.0/sql/diagrams/drop_table.html deleted file mode 100644 index 55be9f639bd..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/drop_table.html +++ /dev/null @@ -1,34 +0,0 @@ -
- - - - - - DROP - - - TABLE - - - IF - - - EXISTS - - - - table_name - - - - , - - - CASCADE - - - RESTRICT - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/drop_view.html b/src/current/_includes/v1.0/sql/diagrams/drop_view.html deleted file mode 100644 index 10f5e36ab5d..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/drop_view.html +++ /dev/null @@ -1,34 +0,0 @@ -
- - - - - - DROP - - - VIEW - - - IF - - - EXISTS - - - - view_name - - - - , - - - CASCADE - - - RESTRICT - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/explain.html b/src/current/_includes/v1.0/sql/diagrams/explain.html deleted file mode 100644 index 58e15942543..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/explain.html +++ /dev/null @@ -1,40 +0,0 @@ -
- - - - - - EXPLAIN - - - ( - - - EXPRS - - - METADATA - - - QUALIFY - - - VERBOSE - - - TYPES - - - , - - - ) - - - - explainable_stmt - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/family_def.html b/src/current/_includes/v1.0/sql/diagrams/family_def.html deleted file mode 100644 index fa031f7c0f7..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/family_def.html +++ /dev/null @@ -1,30 +0,0 @@ -
- - - - - - FAMILY - - - - name - - - - ( - - - - name - - - - , - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/foreign_key_column_level.html b/src/current/_includes/v1.0/sql/diagrams/foreign_key_column_level.html deleted file mode 100644 index a9b0475c6c4..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/foreign_key_column_level.html +++ /dev/null @@ -1,75 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_name - - - - - column_type - - - - REFERENCES - - - - parent_table - - - - ( - - - - ref_column_name - - - - ) - - - - column_constraints - - - - , - - - - column_def - - - - - table_constraints - - - - ) - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/foreign_key_table_level.html b/src/current/_includes/v1.0/sql/diagrams/foreign_key_table_level.html deleted file mode 100644 index 39eec034c18..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/foreign_key_table_level.html +++ /dev/null @@ -1,85 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_def - - - - , - - - CONSTRAINT - - - - name - - - - FOREIGN KEY - - - ( - - - - fk_column_name - - - - , - - - ) - - - REFERENCES - - - - parent_table - - - - ( - - - - ref_column_name - - - - , - - - ) - - - - table_constraints - - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/grammar.html b/src/current/_includes/v1.0/sql/diagrams/grammar.html deleted file mode 100644 index ce76864abf2..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/grammar.html +++ /dev/null @@ -1,9043 +0,0 @@ -
- - -

stmt_block:

-
- - - - - - - stmt_list - - - - -
-

no references


stmt_list:

-
- - - - - - - stmt - - - - ; - - - -
-

referenced by: -

-


stmt:

- -

referenced by: -

-


alter_table_stmt:

-
- - - - - - ALTER - - - TABLE - - - IF - - - EXISTS - - - - relation_expr - - - - - alter_table_cmds - - - - -
-

referenced by: -

-


backup_stmt:

- -

referenced by: -

-


copy_from_stmt:

-
- - - - - - COPY - - - - qualified_name - - - - ( - - - - qualified_name_list - - - - ) - - - FROM - - - STDIN - - - -
-

referenced by: -

-


create_stmt:

- -

referenced by: -

-


delete_stmt:

- -

referenced by: -

-


drop_stmt:

-
- - - - - - DROP - - - DATABASE - - - IF - - - EXISTS - - - - name - - - - INDEX - - - IF - - - EXISTS - - - - table_name_with_index_list - - - - TABLE - - - VIEW - - - IF - - - EXISTS - - - - table_name_list - - - - - opt_drop_behavior - - - - -
-

referenced by: -

-


explain_stmt:

-
- - - - - - EXPLAIN - - - ( - - - - explain_option_list - - - - ) - - - - explainable_stmt - - - - -
-

referenced by: -

-


help_stmt:

-
- - - - - - HELP - - - - unrestricted_name - - - - -
-

referenced by: -

-


prepare_stmt:

- -

referenced by: -

-


execute_stmt:

-
- - - - - - EXECUTE - - - - name - - - - - execute_param_clause - - - - -
-

referenced by: -

-


deallocate_stmt:

-
- - - - - - DEALLOCATE - - - PREPARE - - - - name - - - - ALL - - - -
-

referenced by: -

-


grant_stmt:

-
- - - - - - GRANT - - - - privileges - - - - ON - - - - targets - - - - TO - - - - grantee_list - - - - -
-

referenced by: -

-


insert_stmt:

- -

referenced by: -

-


rename_stmt:

-
- - - - - - ALTER - - - DATABASE - - - - name - - - - INDEX - - - IF - - - EXISTS - - - - table_name_with_index - - - - RENAME - - - TO - - - - name - - - - TABLE - - - IF - - - EXISTS - - - - relation_expr - - - - RENAME - - - TO - - - - qualified_name - - - - - opt_column - - - - - name - - - - TO - - - - name - - - - VIEW - - - IF - - - EXISTS - - - - relation_expr - - - - RENAME - - - TO - - - - qualified_name - - - - -
-

referenced by: -

-


revoke_stmt:

-
- - - - - - REVOKE - - - - privileges - - - - ON - - - - targets - - - - FROM - - - - grantee_list - - - - -
-

referenced by: -

-


savepoint_stmt:

-
- - - - - - SAVEPOINT - - - - savepoint_name - - - - -
-

referenced by: -

-


select_stmt:

- -

referenced by: -

-


set_stmt:

-
- - - - - - SET - - - - set_rest - - - - SESSION - - - CHARACTERISTICS - - - AS - - - TRANSACTION - - - - transaction_iso_level - - - - - set_rest - - - - CLUSTER - - - SETTING - - - - generic_set - - - - -
-

referenced by: -

-


show_stmt:

-
- - - - - - SHOW - - - identifier - - - ALL - - - CLUSTER - - - SETTINGS - - - CLUSTER - - - SETTING - - - - any_name - - - - ALL - - - COLUMNS - - - INDEX - - - INDEXES - - - CONSTRAINT - - - CONSTRAINTS - - - KEYS - - - FROM - - - CREATE - - - TABLE - - - VIEW - - - - var_name - - - - DATABASES - - - GRANTS - - - - on_privilege_target_clause - - - - - for_grantee_clause - - - - TABLES - - - FROM - - - - name - - - - USERS - - - -
-

referenced by: -

-


split_stmt:

-
- - - - - - ALTER - - - TABLE - - - - qualified_name - - - - INDEX - - - - table_name_with_index - - - - SPLIT - - - AT - - - - select_stmt - - - - -
-

referenced by: -

-


transaction_stmt:

-
- - - - - - BEGIN - - - - opt_transaction - - - - START - - - TRANSACTION - - - - opt_transaction_mode_list - - - - COMMIT - - - END - - - - opt_transaction - - - - ROLLBACK - - - - opt_to_savepoint - - - - -
-

referenced by: -

-


release_stmt:

-
- - - - - - RELEASE - - - - savepoint_name - - - - -
-

referenced by: -

-


reset_stmt:

-
- - - - - - RESET - - - - var_name - - - - -
-

referenced by: -

-


truncate_stmt:

- -

referenced by: -

-


update_stmt:

- -

referenced by: -

-


relation_expr:

-
- - - - - - - qualified_name - - - - * - - - ONLY - - - - qualified_name - - - - ( - - - - qualified_name - - - - ) - - - -
-

referenced by: -

-


alter_table_cmds:

-
- - - - - - - alter_table_cmd - - - - , - - - -
-

referenced by: -

-


targets:

-
- - - - - - TABLE - - - - table_pattern_list - - - - DATABASE - - - - name_list - - - - -
-

referenced by: -

-


string_or_placeholder:

-
- - - - - - - non_reserved_word_or_sconst - - - - PLACEHOLDER - - - -
-

referenced by: -

-


opt_as_of_clause:

-
- - - - - - AS - - - OF - - - SYSTEM - - - TIME - - - - a_expr_const - - - - -
-

referenced by: -

-


opt_incremental:

-
- - - - - - INCREMENTAL - - - FROM - - - - string_or_placeholder_list - - - - -
-

referenced by: -

-


opt_with_options:

-
- - - - - - WITH - - - OPTIONS - - - ( - - - - kv_option_list - - - - ) - - - -
-

referenced by: -

-


string_or_placeholder_list:

-
- - - - - - - string_or_placeholder - - - - , - - - -
-

referenced by: -

-


qualified_name:

- -

referenced by: -

-


qualified_name_list:

-
- - - - - - - qualified_name - - - - , - - - -
-

referenced by: -

-


create_database_stmt:

- -

referenced by: -

-


create_index_stmt:

-
- - - - - - CREATE - - - - opt_unique - - - - INDEX - - - - opt_name - - - - IF - - - NOT - - - EXISTS - - - - name - - - - ON - - - - qualified_name - - - - ( - - - - index_params - - - - ) - - - - opt_storing - - - - - opt_interleave - - - - -
-

referenced by: -

-


create_table_stmt:

-
- - - - - - CREATE - - - TABLE - - - IF - - - NOT - - - EXISTS - - - - any_name - - - - ( - - - - opt_table_elem_list - - - - ) - - - - opt_interleave - - - - -
-

referenced by: -

-


create_table_as_stmt:

-
- - - - - - CREATE - - - TABLE - - - IF - - - NOT - - - EXISTS - - - - any_name - - - - - opt_column_list - - - - AS - - - - select_stmt - - - - -
-

referenced by: -

-


create_user_stmt:

-
- - - - - - CREATE - - - USER - - - - name - - - - - opt_with - - - - - opt_password - - - - -
-

referenced by: -

-


create_view_stmt:

-
- - - - - - CREATE - - - VIEW - - - - any_name - - - - - opt_column_list - - - - AS - - - - select_stmt - - - - -
-

referenced by: -

-


relation_expr_opt_alias:

-
- - - - - - - relation_expr - - - - AS - - - - name - - - - -
-

referenced by: -

-


where_clause:

-
- - - - - - WHERE - - - - a_expr - - - - -
-

referenced by: -

-


returning_clause:

-
- - - - - - RETURNING - - - - target_list - - - - NOTHING - - - -
-

referenced by: -

-


name:

- -

referenced by: -

-


table_name_with_index_list:

-
- - - - - - - table_name_with_index - - - - , - - - -
-

referenced by: -

-


opt_drop_behavior:

-
- - - - - - CASCADE - - - RESTRICT - - - -
-

referenced by: -

-


table_name_list:

-
- - - - - - - any_name - - - - , - - - -
-

referenced by: -

-


explainable_stmt:

- -

referenced by: -

-


explain_option_list:

-
- - - - - - - explain_option_name - - - - , - - - -
-

referenced by: -

-


unrestricted_name:

- -

referenced by: -

-


prep_type_clause:

-
- - - - - - ( - - - - type_list - - - - ) - - - -
-

referenced by: -

-


preparable_stmt:

- -

referenced by: -

-


execute_param_clause:

-
- - - - - - ( - - - - expr_list - - - - ) - - - -
-

referenced by: -

-


privileges:

-
- - - - - - ALL - - - - privilege_list - - - - -
-

referenced by: -

-


grantee_list:

-
- - - - - - - name - - - - , - - - -
-

referenced by: -

-


insert_target:

-
- - - - - - - qualified_name - - - - AS - - - - name - - - - -
-

referenced by: -

-


insert_rest:

-
- - - - - - ( - - - - qualified_name_list - - - - ) - - - - select_stmt - - - - DEFAULT - - - VALUES - - - -
-

referenced by: -

-


on_conflict:

-
- - - - - - ON - - - CONFLICT - - - - opt_conf_expr - - - - DO - - - UPDATE - - - SET - - - - set_clause_list - - - - - where_clause - - - - NOTHING - - - -
-

referenced by: -

-


table_name_with_index:

- -

referenced by: -

-


opt_column:

-
- - - - - - COLUMN - - - -
-

referenced by: -

-


savepoint_name:

-
- - - - - - SAVEPOINT - - - - name - - - - -
-

referenced by: -

-


select_no_parens:

- -

referenced by: -

-


select_with_parens:

- -

referenced by: -

-


set_rest:

- -

referenced by: -

-


transaction_iso_level:

-
- - - - - - ISOLATION - - - LEVEL - - - - iso_level - - - - -
-

referenced by: -

-


generic_set:

-
- - - - - - - var_name - - - - TO - - - = - - - - var_list - - - - DEFAULT - - - -
-

referenced by: -

-


any_name:

- -

referenced by: -

-


var_name:

-
- - - - - - - any_name - - - - -
-

referenced by: -

-


on_privilege_target_clause:

-
- - - - - - ON - - - - targets - - - - -
-

referenced by: -

-


for_grantee_clause:

-
- - - - - - FOR - - - - grantee_list - - - - -
-

referenced by: -

-


opt_transaction:

-
- - - - - - TRANSACTION - - - -
-

referenced by: -

-


opt_transaction_mode_list:

- -

referenced by: -

-


opt_to_savepoint:

-
- - - - - - TRANSACTION - - - TO - - - - savepoint_name - - - - -
-

referenced by: -

-


opt_table:

-
- - - - - - TABLE - - - -
-

referenced by: -

-


relation_expr_list:

-
- - - - - - - relation_expr - - - - , - - - -
-

referenced by: -

-


set_clause_list:

-
- - - - - - - set_clause - - - - , - - - -
-

referenced by: -

-


alter_table_cmd:

-
- - - - - - ADD - - - COLUMN - - - IF - - - NOT - - - EXISTS - - - - column_def - - - - - table_constraint - - - - - opt_validate_behavior - - - - ALTER - - - - opt_column - - - - - name - - - - - alter_column_default - - - - DROP - - - NOT - - - NULL - - - DROP - - - - opt_column - - - - CONSTRAINT - - - IF - - - EXISTS - - - - name - - - - - opt_drop_behavior - - - - VALIDATE - - - CONSTRAINT - - - - name - - - - -
-

referenced by: -

-


table_pattern_list:

-
- - - - - - - table_pattern - - - - , - - - -
-

referenced by: -

-


name_list:

-
- - - - - - - name - - - - , - - - -
-

referenced by: -

-


non_reserved_word_or_sconst:

-
- - - - - - - non_reserved_word - - - - SCONST - - - -
-

referenced by: -

-


a_expr_const:

-
- - - - - - ICONST - - - FCONST - - - - const_typename - - - - SCONST - - - BCONST - - - - interval - - - - TRUE - - - FALSE - - - NULL - - - -
-

referenced by: -

-


kv_option_list:

-
- - - - - - - kv_option - - - - , - - - -
-

referenced by: -

-


qname_indirection:

- -

referenced by: -

-


opt_with:

-
- - - - - - WITH - - - -
-

referenced by: -

-


opt_template_clause:

- -

referenced by: -

-


opt_encoding_clause:

- -

referenced by: -

-


opt_lc_collate_clause:

- -

referenced by: -

-


opt_lc_ctype_clause:

- -

referenced by: -

-


opt_unique:

-
- - - - - - UNIQUE - - - -
-

referenced by: -

-


opt_name:

-
- - - - - - - name - - - - -
-

referenced by: -

-


index_params:

-
- - - - - - - index_elem - - - - , - - - -
-

referenced by: -

-


opt_storing:

-
- - - - - - - storing - - - - ( - - - - name_list - - - - ) - - - -
-

referenced by: -

-


opt_interleave:

-
- - - - - - INTERLEAVE - - - IN - - - PARENT - - - - name - - - - ( - - - - name_list - - - - ) - - - -
-

referenced by: -

-


opt_table_elem_list:

- -

referenced by: -

-


opt_column_list:

-
- - - - - - ( - - - - name_list - - - - ) - - - -
-

referenced by: -

-


opt_password:

-
- - - - - - PASSWORD - - - SCONST - - - -
-

referenced by: -

-


a_expr:

-
- - - - - - - c_expr - - - - - a_expr - - - - TYPECAST - - - - cast_target - - - - TYPEANNOTATE - - - - typename - - - - COLLATE - - - - any_name - - - - + - - - - - - - * - - - / - - - FLOORDIV - - - % - - - ^ - - - # - - - & - - - | - - - < - - - > - - - = - - - CONCAT - - - LSHIFT - - - RSHIFT - - - LESS_EQUALS - - - GREATER_EQUALS - - - NOT_EQUALS - - - BETWEEN - - - - opt_asymmetric - - - - SYMMETRIC - - - - b_expr - - - - AND - - - OR - - - LIKE - - - ILIKE - - - SIMILAR - - - TO - - - ~ - - - NOT_REGMATCH - - - REGIMATCH - - - NOT_REGIMATCH - - - - a_expr - - - - NOT - - - LIKE - - - ILIKE - - - SIMILAR - - - TO - - - BETWEEN - - - - opt_asymmetric - - - - SYMMETRIC - - - - b_expr - - - - AND - - - - a_expr - - - - IN - - - - in_expr - - - - IS - - - NOT - - - NAN - - - NULL - - - TRUE - - - FALSE - - - UNKNOWN - - - DISTINCT - - - FROM - - - - a_expr - - - - OF - - - ( - - - - type_list - - - - ) - - - IN - - - - in_expr - - - - - subquery_op - - - - - sub_type - - - - - d_expr - - - - + - - - - - - - ~ - - - NOT - - - - a_expr - - - - -
-

referenced by: -

-


target_list:

-
- - - - - - - target_elem - - - - , - - - -
-

referenced by: -

-


unreserved_keyword:

-
- - - - - - ACTION - - - ADD - - - ALTER - - - AT - - - BACKUP - - - BEGIN - - - BLOB - - - BY - - - CASCADE - - - CLUSTER - - - COLUMNS - - - COMMIT - - - COMMITTED - - - CONFLICT - - - CONSTRAINTS - - - COPY - - - COVERING - - - CUBE - - - CURRENT - - - CYCLE - - - DATA - - - DATABASE - - - DATABASES - - - DAY - - - DEALLOCATE - - - DELETE - - - DOUBLE - - - DROP - - - ENCODING - - - EXECUTE - - - EXPLAIN - - - FILTER - - - FIRST - - - FOLLOWING - - - FORCE_INDEX - - - GRANTS - - - HELP - - - HIGH - - - HOUR - - - INCREMENTAL - - - INDEXES - - - INSERT - - - INT2VECTOR - - - INTERLEAVE - - - ISOLATION - - - KEY - - - KEYS - - - LC_COLLATE - - - LC_CTYPE - - - LEVEL - - - LOCAL - - - LOW - - - MATCH - - - MINUTE - - - MONTH - - - NAMES - - - NAN - - - NEXT - - - NO - - - NORMAL - - - NO_INDEX_JOIN - - - NULLS - - - OF - - - OFF - - - OID - - - OPTIONS - - - ORDINALITY - - - OVER - - - PARENT - - - PARTIAL - - - PARTITION - - - PASSWORD - - - PRECEDING - - - PREPARE - - - PRIORITY - - - RANGE - - - READ - - - RECURSIVE - - - REF - - - REGCLASS - - - REGPROC - - - REGPROCEDURE - - - REGNAMESPACE - - - REGTYPE - - - RELEASE - - - RENAME - - - REPEATABLE - - - RESET - - - RESTORE - - - RESTRICT - - - REVOKE - - - ROLLBACK - - - ROLLUP - - - ROWS - - - SETTING - - - SETTINGS - - - STATUS - - - SAVEPOINT - - - SCATTER - - - SEARCH - - - SECOND - - - SERIALIZABLE - - - SESSION - - - SET - - - SHOW - - - SIMPLE - - - SNAPSHOT - - - SQL - - - START - - - STDIN - - - STORING - - - STRICT - - - SPLIT - - - SYSTEM - - - TABLES - - - TEMPLATE - - - TESTING_RANGES - - - TESTING_RELOCATE - - - TEXT - - - TRANSACTION - - - TRUNCATE - - - TYPE - - - UNBOUNDED - - - UNCOMMITTED - - - UNKNOWN - - - UPDATE - - - UPSERT - - - USERS - - - VALID - - - VALIDATE - - - VALUE - - - VARYING - - - WITHIN - - - WITHOUT - - - YEAR - - - ZONE - - - -
-

referenced by: -

-


col_name_keyword:

-
- - - - - - ANNOTATE_TYPE - - - BETWEEN - - - BIGINT - - - BIGSERIAL - - - BIT - - - BOOL - - - BOOLEAN - - - BYTEA - - - BYTES - - - CHAR - - - CHARACTER - - - CHARACTERISTICS - - - COALESCE - - - DATE - - - DEC - - - DECIMAL - - - EXISTS - - - EXTRACT - - - EXTRACT_DURATION - - - FLOAT - - - GREATEST - - - GROUPING - - - IF - - - IFNULL - - - INT - - - INT8 - - - INT64 - - - INTEGER - - - INTERVAL - - - LEAST - - - NAME - - - NULLIF - - - NUMERIC - - - OUT - - - OVERLAY - - - POSITION - - - PRECISION - - - REAL - - - ROW - - - SERIAL - - - SMALLINT - - - SMALLSERIAL - - - STRING - - - SUBSTRING - - - TIME - - - TIMESTAMP - - - TIMESTAMPTZ - - - TREAT - - - TRIM - - - VALUES - - - VARCHAR - - - -
-

referenced by: -

-


explain_option_name:

- -

referenced by: -

-


type_func_name_keyword:

-
- - - - - - COLLATION - - - CROSS - - - FULL - - - INNER - - - ILIKE - - - IS - - - JOIN - - - LEFT - - - LIKE - - - NATURAL - - - OUTER - - - OVERLAPS - - - RIGHT - - - SIMILAR - - - -
-

referenced by: -

-


reserved_keyword:

-
- - - - - - ALL - - - ANALYSE - - - ANALYZE - - - AND - - - ANY - - - ARRAY - - - AS - - - ASC - - - ASYMMETRIC - - - BOTH - - - CASE - - - CAST - - - CHECK - - - COLLATE - - - COLUMN - - - CONSTRAINT - - - CREATE - - - CURRENT_CATALOG - - - CURRENT_DATE - - - CURRENT_ROLE - - - CURRENT_TIME - - - CURRENT_TIMESTAMP - - - CURRENT_USER - - - DEFAULT - - - DEFERRABLE - - - DESC - - - DISTINCT - - - DO - - - ELSE - - - END - - - EXCEPT - - - FALSE - - - FAMILY - - - FETCH - - - FOR - - - FOREIGN - - - FROM - - - GRANT - - - GROUP - - - HAVING - - - IN - - - INDEX - - - INITIALLY - - - INTERSECT - - - INTO - - - LATERAL - - - LEADING - - - LIMIT - - - LOCALTIME - - - LOCALTIMESTAMP - - - NOT - - - NOTHING - - - NULL - - - OFFSET - - - ON - - - ONLY - - - OR - - - ORDER - - - PLACING - - - PRIMARY - - - REFERENCES - - - RETURNING - - - SELECT - - - SESSION_USER - - - SOME - - - SYMMETRIC - - - TABLE - - - THEN - - - TO - - - TRAILING - - - TRUE - - - UNION - - - UNIQUE - - - USER - - - USING - - - VARIADIC - - - VIEW - - - WHEN - - - WHERE - - - WINDOW - - - WITH - - - -
-

referenced by: -

-


type_list:

-
- - - - - - - typename - - - - , - - - -
-

referenced by: -

-


expr_list:

-
- - - - - - - a_expr - - - - , - - - -
-

referenced by: -

-


privilege_list:

-
- - - - - - - privilege - - - - , - - - -
-

referenced by: -

-


opt_conf_expr:

-
- - - - - - ( - - - - name_list - - - - ) - - - - where_clause - - - - -
-

referenced by: -

-


simple_select:

- -

referenced by: -

-


select_clause:

- -

referenced by: -

-


sort_clause:

-
- - - - - - ORDER - - - BY - - - - sortby_list - - - - -
-

referenced by: -

-


opt_sort_clause:

-
- - - - - - - sort_clause - - - - -
-

referenced by: -

-


select_limit:

- -

referenced by: -

-


transaction_mode_list:

- -

referenced by: -

-


set_rest_more:

-
- - - - - - - generic_set - - - - -
-

referenced by: -

-


iso_level:

-
- - - - - - READ - - - UNCOMMITTED - - - COMMITTED - - - SNAPSHOT - - - REPEATABLE - - - READ - - - SERIALIZABLE - - - -
-

referenced by: -

-


var_list:

-
- - - - - - - var_value - - - - , - - - -
-

referenced by: -

-


transaction_user_priority:

-
- - - - - - PRIORITY - - - - user_priority - - - - -
-

referenced by: -

-


set_clause:

- -

referenced by: -

-


column_def:

- -

referenced by: -

-


alter_column_default:

-
- - - - - - SET - - - DEFAULT - - - - a_expr - - - - DROP - - - DEFAULT - - - -
-

referenced by: -

-


table_constraint:

-
- - - - - - CONSTRAINT - - - - name - - - - - constraint_elem - - - - -
-

referenced by: -

-


opt_validate_behavior:

-
- - - - - - NOT - - - VALID - - - -
-

referenced by: -

-


table_pattern:

- -

referenced by: -

-


non_reserved_word:

- -

referenced by: -

-


const_typename:

- -

referenced by: -

-


interval:

-
- - - - - - INTERVAL - - - SCONST - - - - opt_interval - - - - -
-

referenced by: -

-


kv_option:

-
- - - - - - SCONST - - - - opt_equal_value - - - - -
-

referenced by: -

-


name_indirection_elem:

- -

referenced by: -

-


opt_equal:

-
- - - - - - = - - - -
-

referenced by: -

-


index_elem:

- -

referenced by: -

-


storing:

-
- - - - - - COVERING - - - STORING - - - -
-

referenced by: -

-


table_elem_list:

-
- - - - - - - table_elem - - - - , - - - -
-

referenced by: -

-


c_expr:

- -

referenced by: -

-


cast_target:

- -

referenced by: -

-


typename:

-
- - - - - - - simple_typename - - - - [ - - - ] - - - -
-

referenced by: -

-


opt_asymmetric:

-
- - - - - - ASYMMETRIC - - - -
-

referenced by: -

-


b_expr:

-
- - - - - - - c_expr - - - - - b_expr - - - - TYPECAST - - - - cast_target - - - - TYPEANNOTATE - - - - typename - - - - + - - - - - - - * - - - / - - - FLOORDIV - - - % - - - ^ - - - # - - - & - - - | - - - < - - - > - - - = - - - CONCAT - - - LSHIFT - - - RSHIFT - - - LESS_EQUALS - - - GREATER_EQUALS - - - NOT_EQUALS - - - - b_expr - - - - IS - - - NOT - - - DISTINCT - - - FROM - - - - b_expr - - - - OF - - - ( - - - - type_list - - - - ) - - - + - - - - - - - ~ - - - - b_expr - - - - -
-

referenced by: -

-


in_expr:

-
- - - - - - - select_with_parens - - - - ( - - - - expr_list - - - - ) - - - -
-

referenced by: -

-


subquery_op:

-
- - - - - - - math_op - - - - NOT - - - LIKE - - - ILIKE - - - -
-

referenced by: -

-


sub_type:

-
- - - - - - ANY - - - SOME - - - ALL - - - -
-

referenced by: -

-


d_expr:

- -

referenced by: -

-


target_elem:

-
- - - - - - - a_expr - - - - AS - - - - unrestricted_name - - - - identifier - - - * - - - -
-

referenced by: -

-


privilege:

-
- - - - - - CREATE - - - DROP - - - GRANT - - - SELECT - - - INSERT - - - DELETE - - - UPDATE - - - -
-

referenced by: -

-


opt_all_clause:

-
- - - - - - ALL - - - -
-

referenced by: -

-


from_clause:

- -

referenced by: -

-


group_clause:

-
- - - - - - GROUP - - - BY - - - - expr_list - - - - -
-

referenced by: -

-


having_clause:

-
- - - - - - HAVING - - - - a_expr - - - - -
-

referenced by: -

-


window_clause:

-
- - - - - - WINDOW - - - - window_definition_list - - - - -
-

referenced by: -

-


all_or_distinct:

-
- - - - - - ALL - - - DISTINCT - - - -
-

referenced by: -

-


sortby_list:

-
- - - - - - - sortby - - - - , - - - -
-

referenced by: -

-


limit_clause:

-
- - - - - - LIMIT - - - - select_limit_value - - - - -
-

referenced by: -

-


offset_clause:

- -

referenced by: -

-


var_value:

- -

referenced by: -

-


user_priority:

-
- - - - - - LOW - - - NORMAL - - - HIGH - - - -
-

referenced by: -

-


single_set_clause:

- -

referenced by: -

-


multiple_set_clause:

- -

referenced by: -

-


constraint_elem:

-
- - - - - - CHECK - - - ( - - - - a_expr - - - - PRIMARY - - - KEY - - - ( - - - - index_params - - - - ) - - - UNIQUE - - - ( - - - - index_params - - - - ) - - - - opt_storing - - - - - opt_interleave - - - - FOREIGN - - - KEY - - - ( - - - - name_list - - - - ) - - - REFERENCES - - - - qualified_name - - - - - opt_column_list - - - - -
-

referenced by: -

-


name_indirection:

-
- - - - - - . - - - - unrestricted_name - - - - -
-

referenced by: -

-


glob_indirection:

-
- - - - - - . - - - * - - - -
-

referenced by: -

-


numeric:

-
- - - - - - INT - - - INT8 - - - INT64 - - - INTEGER - - - SMALLINT - - - BIGINT - - - REAL - - - FLOAT - - - - opt_float - - - - DOUBLE - - - PRECISION - - - DECIMAL - - - DEC - - - NUMERIC - - - - opt_numeric_modifiers - - - - BOOLEAN - - - BOOL - - - -
-

referenced by: -

-


const_bit:

- -

referenced by: -

-


const_character:

- -

referenced by: -

-


const_datetime:

-
- - - - - - DATE - - - TIMESTAMP - - - WITHOUT - - - WITH - - - TIME - - - ZONE - - - TIMESTAMPTZ - - - -
-

referenced by: -

-


opt_interval:

-
- - - - - - YEAR - - - TO - - - MONTH - - - MONTH - - - DAY - - - TO - - - HOUR - - - MINUTE - - - SECOND - - - HOUR - - - TO - - - MINUTE - - - SECOND - - - MINUTE - - - TO - - - SECOND - - - SECOND - - - -
-

referenced by: -

-


opt_equal_value:

-
- - - - - - = - - - SCONST - - - -
-

referenced by: -

-


opt_asc_desc:

-
- - - - - - ASC - - - DESC - - - -
-

referenced by: -

-


table_elem:

- -

referenced by: -

-


case_expr:

- -

referenced by: -

-


postgres_oid:

-
- - - - - - REGPROC - - - REGPROCEDURE - - - REGCLASS - - - REGTYPE - - - REGNAMESPACE - - - -
-

referenced by: -

-


simple_typename:

-
- - - - - - - numeric - - - - - bit - - - - - character - - - - - const_datetime - - - - INTERVAL - - - - opt_interval - - - - BLOB - - - BYTES - - - BYTEA - - - TEXT - - - NAME - - - SERIAL - - - SMALLSERIAL - - - BIGSERIAL - - - OID - - - INT2VECTOR - - - -
-

referenced by: -

-


math_op:

-
- - - - - - + - - - - - - - * - - - / - - - FLOORDIV - - - % - - - & - - - | - - - ^ - - - # - - - < - - - > - - - = - - - LESS_EQUALS - - - GREATER_EQUALS - - - NOT_EQUALS - - - -
-

referenced by: -

-


func_expr:

- -

referenced by: -

-


array_expr:

-
- - - - - - [ - - - - expr_list - - - - - array_expr_list - - - - ] - - - -
-

referenced by: -

-


explicit_row:

-
- - - - - - ROW - - - ( - - - - expr_list - - - - ) - - - -
-

referenced by: -

-


implicit_row:

-
- - - - - - ( - - - - expr_list - - - - , - - - - a_expr - - - - ) - - - -
-

referenced by: -

-


from_list:

-
- - - - - - - table_ref - - - - , - - - -
-

referenced by: -

-


window_definition_list:

-
- - - - - - - window_definition - - - - , - - - -
-

referenced by: -

-


ctext_row:

-
- - - - - - ( - - - - ctext_expr_list - - - - ) - - - -
-

referenced by: -

-


sortby:

- -

referenced by: -

-


select_limit_value:

-
- - - - - - - a_expr - - - - ALL - - - -
-

referenced by: -

-


row_or_rows:

-
- - - - - - ROW - - - ROWS - - - -
-

referenced by: -

-


opt_boolean_or_string:

-
- - - - - - TRUE - - - FALSE - - - ON - - - - non_reserved_word_or_sconst - - - - -
-

referenced by: -

-


numeric_only:

-
- - - - - - - - - - FCONST - - - - signed_iconst - - - - -
-

referenced by: -

-


ctext_expr:

-
- - - - - - - a_expr - - - - DEFAULT - - - -
-

referenced by: -

-


col_qualification:

-
- - - - - - CONSTRAINT - - - - name - - - - - col_qualification_elem - - - - COLLATE - - - - any_name - - - - FAMILY - - - - name - - - - CREATE - - - FAMILY - - - - opt_name - - - - IF - - - NOT - - - EXISTS - - - FAMILY - - - - name - - - - -
-

referenced by: -

-


opt_float:

-
- - - - - - ( - - - ICONST - - - ) - - - -
-

referenced by: -

-


opt_numeric_modifiers:

-
- - - - - - ( - - - ICONST - - - , - - - ICONST - - - ) - - - -
-

referenced by: -

-


bit_with_length:

-
- - - - - - BIT - - - - opt_varying - - - - ( - - - ICONST - - - ) - - - -
-

referenced by: -

-


bit_without_length:

-
- - - - - - BIT - - - - opt_varying - - - - -
-

referenced by: -

-


character_with_length:

-
- - - - - - - character_base - - - - ( - - - ICONST - - - ) - - - -
-

referenced by: -

-


character_without_length:

-
- - - - - - - character_base - - - - -
-

referenced by: -

-


index_def:

-
- - - - - - UNIQUE - - - INDEX - - - - opt_name - - - - ( - - - - index_params - - - - ) - - - - opt_storing - - - - - opt_interleave - - - - -
-

referenced by: -

-


family_def:

-
- - - - - - FAMILY - - - - opt_name - - - - ( - - - - name_list - - - - ) - - - -
-

referenced by: -

-


array_subscript:

- -

referenced by: -

-


case_arg:

-
- - - - - - - a_expr - - - - -
-

referenced by: -

-


case_default:

-
- - - - - - ELSE - - - - a_expr - - - - -
-

referenced by: -

-


bit:

- -

referenced by: -

-


character:

- -

referenced by: -

-


func_application:

-
- - - - - - - func_name - - - - ( - - - ALL - - - DISTINCT - - - - expr_list - - - - - opt_sort_clause - - - - * - - - ) - - - -
-

referenced by: -

-


filter_clause:

-
- - - - - - FILTER - - - ( - - - WHERE - - - - a_expr - - - - ) - - - -
-

referenced by: -

-


over_clause:

- -

referenced by: -

-


func_expr_common_subexpr:

-
- - - - - - CURRENT_DATE - - - CURRENT_TIMESTAMP - - - ( - - - ) - - - CAST - - - ( - - - - a_expr - - - - AS - - - - cast_target - - - - ANNOTATE_TYPE - - - ( - - - - a_expr - - - - , - - - - typename - - - - EXTRACT - - - EXTRACT_DURATION - - - ( - - - - extract_list - - - - OVERLAY - - - ( - - - - overlay_list - - - - POSITION - - - ( - - - - position_list - - - - SUBSTRING - - - ( - - - - substr_list - - - - IF - - - ( - - - - a_expr - - - - , - - - NULLIF - - - IFNULL - - - ( - - - - a_expr - - - - , - - - - a_expr - - - - COALESCE - - - GREATEST - - - LEAST - - - ( - - - - expr_list - - - - TRIM - - - ( - - - BOTH - - - LEADING - - - TRAILING - - - - trim_list - - - - ) - - - -
-

referenced by: -

-


array_expr_list:

-
- - - - - - - array_expr - - - - , - - - -
-

referenced by: -

-


table_ref:

- -

referenced by: -

-


window_definition:

- -

referenced by: -

-


ctext_expr_list:

-
- - - - - - - ctext_expr - - - - , - - - -
-

referenced by: -

-


signed_iconst:

-
- - - - - - + - - - - - - - ICONST - - - -
-

referenced by: -

-


col_qualification_elem:

-
- - - - - - NOT - - - NULL - - - UNIQUE - - - PRIMARY - - - KEY - - - CHECK - - - ( - - - - a_expr - - - - ) - - - DEFAULT - - - - b_expr - - - - REFERENCES - - - - qualified_name - - - - - opt_name_parens - - - - -
-

referenced by: -

-


opt_varying:

-
- - - - - - VARYING - - - -
-

referenced by: -

-


character_base:

-
- - - - - - CHARACTER - - - CHAR - - - - opt_varying - - - - VARCHAR - - - STRING - - - -
-

referenced by: -

-


opt_slice_bound:

-
- - - - - - - a_expr - - - - -
-

referenced by: -

-


when_clause:

-
- - - - - - WHEN - - - - a_expr - - - - THEN - - - - a_expr - - - - -
-

referenced by: -

-


func_name:

- -

referenced by: -

-


window_specification:

- -

referenced by: -

-


extract_list:

- -

referenced by: -

-


overlay_list:

- -

referenced by: -

-


position_list:

-
- - - - - - - b_expr - - - - IN - - - - b_expr - - - - -
-

referenced by: -

-


substr_list:

- -

referenced by: -

-


trim_list:

-
- - - - - - - a_expr - - - - FROM - - - - expr_list - - - - -
-

referenced by: -

-


opt_index_hints:

-
- - - - - - @ - - - - unrestricted_name - - - - [ - - - ICONST - - - ] - - - { - - - - index_hints_param_list - - - - } - - - -
-

referenced by: -

-


opt_ordinality:

-
- - - - - - WITH - - - ORDINALITY - - - -
-

referenced by: -

-


opt_alias_clause:

-
- - - - - - - alias_clause - - - - -
-

referenced by: -

-


joined_table:

- -

referenced by: -

-


alias_clause:

-
- - - - - - AS - - - - name - - - - ( - - - - name_list - - - - ) - - - -
-

referenced by: -

-


opt_name_parens:

-
- - - - - - ( - - - - name - - - - ) - - - -
-

referenced by: -

-


type_function_name:

- -

referenced by: -

-


opt_existing_window_name:

-
- - - - - - - name - - - - -
-

referenced by: -

-


opt_partition_clause:

-
- - - - - - PARTITION - - - BY - - - - expr_list - - - - -
-

referenced by: -

-


extract_arg:

-
- - - - - - identifier - - - YEAR - - - MONTH - - - DAY - - - HOUR - - - MINUTE - - - SECOND - - - -
-

referenced by: -

-


overlay_placing:

-
- - - - - - PLACING - - - - a_expr - - - - -
-

referenced by: -

-


substr_from:

-
- - - - - - FROM - - - - a_expr - - - - -
-

referenced by: -

-


substr_for:

-
- - - - - - FOR - - - - a_expr - - - - -
-

referenced by: -

-


index_hints_param_list:

-
- - - - - - - index_hints_param - - - - , - - - -
-

referenced by: -

-


join_type:

-
- - - - - - FULL - - - LEFT - - - RIGHT - - - - join_outer - - - - INNER - - - -
-

referenced by: -

-


join_qual:

-
- - - - - - USING - - - ( - - - - name_list - - - - ) - - - ON - - - - a_expr - - - - -
-

referenced by: -

-


index_hints_param:

-
- - - - - - FORCE_INDEX - - - = - - - - unrestricted_name - - - - NO_INDEX_JOIN - - - -
-

referenced by: -

-


join_outer:

-
- - - - - - OUTER - - - -
-

referenced by: -

-


generated by Railroad Diagram Generator

diff --git a/src/current/_includes/v1.0/sql/diagrams/grant.html b/src/current/_includes/v1.0/sql/diagrams/grant.html deleted file mode 100644 index a5c33fa42bb..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/grant.html +++ /dev/null @@ -1,74 +0,0 @@ -
- - - - - - GRANT - - - ALL - - - CREATE - - - DROP - - - GRANT - - - SELECT - - - INSERT - - - DELETE - - - UPDATE - - - , - - - ON - - - TABLE - - - - table_name - - - - , - - - DATABASE - - - - database_name - - - - , - - - TO - - - - user_name - - - - , - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/index_def.html b/src/current/_includes/v1.0/sql/diagrams/index_def.html deleted file mode 100644 index 3dfed2ce23e..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/index_def.html +++ /dev/null @@ -1,55 +0,0 @@ -
- - - - - - UNIQUE - - - INDEX - - - - name - - - - ( - - - - index_elem - - - - , - - - ) - - - COVERING - - - STORING - - - ( - - - - name_list - - - - ) - - - - opt_interleave - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/insert.html b/src/current/_includes/v1.0/sql/diagrams/insert.html deleted file mode 100644 index e0b5596f36f..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/insert.html +++ /dev/null @@ -1,65 +0,0 @@ -
- - - - - - INSERT - - - INTO - - - - qualified_name - - - - AS - - - - name - - - - ( - - - - qualified_name_list - - - - ) - - - - select_stmt - - - - DEFAULT - - - VALUES - - - - on_conflict - - - - RETURNING - - - - target_list - - - - NOTHING - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/interleave.html b/src/current/_includes/v1.0/sql/diagrams/interleave.html deleted file mode 100644 index daebf6cbd21..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/interleave.html +++ /dev/null @@ -1,64 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - IF - - - NOT - - - EXISTS - - - - table_name - - - - ( - - - - table_definition - - - - ) - - - INTERLEAVE - - - IN - - - PARENT - - - - parent_table - - - - ( - - - - interleave_prefix - - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/not_null_column_level.html b/src/current/_includes/v1.0/sql/diagrams/not_null_column_level.html deleted file mode 100644 index 4de57106fa5..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/not_null_column_level.html +++ /dev/null @@ -1,59 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_name - - - - - column_type - - - - NOT NULL - - - - column_constraints - - - - , - - - - column_def - - - - - table_constraints - - - - ) - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/opt_interleave.html b/src/current/_includes/v1.0/sql/diagrams/opt_interleave.html deleted file mode 100644 index cab32629993..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/opt_interleave.html +++ /dev/null @@ -1,33 +0,0 @@ -
- - - - - - INTERLEAVE - - - IN - - - PARENT - - - - name - - - - ( - - - - interleave_prefix - - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/primary_key_column_level.html b/src/current/_includes/v1.0/sql/diagrams/primary_key_column_level.html deleted file mode 100644 index 1fb48d46d65..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/primary_key_column_level.html +++ /dev/null @@ -1,59 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_name - - - - - column_type - - - - PRIMARY KEY - - - - column_constraints - - - - , - - - - column_def - - - - - table_constraints - - - - ) - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/primary_key_table_level.html b/src/current/_includes/v1.0/sql/diagrams/primary_key_table_level.html deleted file mode 100644 index 3f39a4c1c1e..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/primary_key_table_level.html +++ /dev/null @@ -1,63 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_def - - - - , - - - CONSTRAINT - - - - name - - - - PRIMARY KEY - - - ( - - - - column_name - - - - , - - - ) - - - - table_constraints - - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/release_savepoint.html b/src/current/_includes/v1.0/sql/diagrams/release_savepoint.html deleted file mode 100644 index 7b4726b5303..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/release_savepoint.html +++ /dev/null @@ -1,19 +0,0 @@ -
- - - - - - RELEASE - - - SAVEPOINT - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/rename_column.html b/src/current/_includes/v1.0/sql/diagrams/rename_column.html deleted file mode 100644 index 39d719de049..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/rename_column.html +++ /dev/null @@ -1,44 +0,0 @@ -
- - - - - - ALTER - - - TABLE - - - IF - - - EXISTS - - - - table_name - - - - RENAME - - - COLUMN - - - - current_name - - - - TO - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/rename_database.html b/src/current/_includes/v1.0/sql/diagrams/rename_database.html deleted file mode 100644 index fff0f9c5ace..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/rename_database.html +++ /dev/null @@ -1,30 +0,0 @@ -
- - - - - - ALTER - - - DATABASE - - - - name - - - - RENAME - - - TO - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/rename_index.html b/src/current/_includes/v1.0/sql/diagrams/rename_index.html deleted file mode 100644 index 8560b1b8d89..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/rename_index.html +++ /dev/null @@ -1,44 +0,0 @@ -
- - - - - - ALTER - - - INDEX - - - IF - - - EXISTS - - - - table_name - - - - @ - - - - index_name - - - - RENAME - - - TO - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/rename_table.html b/src/current/_includes/v1.0/sql/diagrams/rename_table.html deleted file mode 100644 index 5a038f65515..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/rename_table.html +++ /dev/null @@ -1,36 +0,0 @@ -
- - - - - - ALTER - - - TABLE - - - IF - - - EXISTS - - - - current_name - - - - RENAME - - - TO - - - - new_name - - - - -
\ No newline at end of file diff --git a/src/current/_includes/v1.0/sql/diagrams/restore.html b/src/current/_includes/v1.0/sql/diagrams/restore.html deleted file mode 100644 index 27550b325a9..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/restore.html +++ /dev/null @@ -1,52 +0,0 @@ -
- - - - - - RESTORE - - - TABLE - - - - table_pattern - - - - , - - - FROM - - - - full_backup_location - - - - - incremental_backup_location - - - - , - - - WITH OPTIONS - - - ( - - - - kv_option_list - - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/revoke.html b/src/current/_includes/v1.0/sql/diagrams/revoke.html deleted file mode 100644 index 6082ac02caa..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/revoke.html +++ /dev/null @@ -1,74 +0,0 @@ -
- - - - - - REVOKE - - - ALL - - - CREATE - - - DROP - - - GRANT - - - SELECT - - - INSERT - - - DELETE - - - UPDATE - - - , - - - ON - - - TABLE - - - - table_name - - - - , - - - DATABASE - - - - database_name - - - - , - - - FROM - - - - user_name - - - - , - - - -
\ No newline at end of file diff --git a/src/current/_includes/v1.0/sql/diagrams/rollback_transaction.html b/src/current/_includes/v1.0/sql/diagrams/rollback_transaction.html deleted file mode 100644 index 87cf046b8fb..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/rollback_transaction.html +++ /dev/null @@ -1,22 +0,0 @@ -
- - - - - - ROLLBACK - - - TO - - - SAVEPOINT - - - - cockroach_restart - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/savepoint.html b/src/current/_includes/v1.0/sql/diagrams/savepoint.html deleted file mode 100644 index c8adefcb078..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/savepoint.html +++ /dev/null @@ -1,19 +0,0 @@ -
- - - - - - SAVEPOINT - - - SAVEPOINT - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/select.html b/src/current/_includes/v1.0/sql/diagrams/select.html deleted file mode 100644 index aa9f9023eef..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/select.html +++ /dev/null @@ -1,120 +0,0 @@ -
- - - - - - SELECT - - - DISTINCT - - - - target_elem - - - - AS - - - - col_label - - - - , - - - FROM - - - - table_ref - - - - @ - - - - index_name - - - - , - - - AS OF SYSTEM TIME - - - - timestamp - - - - WHERE - - - - a_expr - - - - GROUP BY - - - - expr_list - - - - HAVING - - - - a_expr - - - - UNION - - - INTERSECT - - - EXCEPT - - - ALL - - - SELECT ... - - - ORDER BY - - - - sortby_list - - - - LIMIT - - - - limit_val - - - - OFFSET - - - - offset_val - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/set_cluster_setting.html b/src/current/_includes/v1.0/sql/diagrams/set_cluster_setting.html deleted file mode 100644 index f016d5fe189..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/set_cluster_setting.html +++ /dev/null @@ -1,39 +0,0 @@ -
- - - - - - SET - - - CLUSTER - - - SETTING - - - - var_name - - - - TO - - - = - - - - var_value - - - - , - - - DEFAULT - - - -
\ No newline at end of file diff --git a/src/current/_includes/v1.0/sql/diagrams/set_transaction.html b/src/current/_includes/v1.0/sql/diagrams/set_transaction.html deleted file mode 100644 index 794ddf60191..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/set_transaction.html +++ /dev/null @@ -1,62 +0,0 @@ -
- - - - - - SET - - - TRANSACTION - - - ISOLATION LEVEL - - - SNAPSHOT - - - SERIALIZABLE - - - , - - - PRIORITY - - - LOW - - - NORMAL - - - HIGH - - - PRIORITY - - - LOW - - - NORMAL - - - HIGH - - - , - - - ISOLATION LEVEL - - - SNAPSHOT - - - SERIALIZABLE - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/set_var.html b/src/current/_includes/v1.0/sql/diagrams/set_var.html deleted file mode 100644 index c469de25d73..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/set_var.html +++ /dev/null @@ -1,53 +0,0 @@ -
- - - - - - SET - - - SESSION - - - - var_name - - - - TO - - - = - - - - var_value - - - - , - - - DEFAULT - - - TIME - - - ZONE - - - - var_value - - - - DEFAULT - - - LOCAL - - - -
\ No newline at end of file diff --git a/src/current/_includes/v1.0/sql/diagrams/show_cluster_setting.html b/src/current/_includes/v1.0/sql/diagrams/show_cluster_setting.html deleted file mode 100644 index ced1f574df9..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_cluster_setting.html +++ /dev/null @@ -1,31 +0,0 @@ -
- - - - - - SHOW - - - CLUSTER - - - SETTING - - - - any_name - - - - ALL - - - CLUSTER - - - SETTINGS - - - -
\ No newline at end of file diff --git a/src/current/_includes/v1.0/sql/diagrams/show_columns.html b/src/current/_includes/v1.0/sql/diagrams/show_columns.html deleted file mode 100644 index cde37387894..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_columns.html +++ /dev/null @@ -1,22 +0,0 @@ -
- - - - - - SHOW - - - COLUMNS - - - FROM - - - - table_name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_constraints.html b/src/current/_includes/v1.0/sql/diagrams/show_constraints.html deleted file mode 100644 index f198aa32272..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_constraints.html +++ /dev/null @@ -1,22 +0,0 @@ -
- - - - - - SHOW - - - CONSTRAINTS - - - FROM - - - - table_name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_create_table.html b/src/current/_includes/v1.0/sql/diagrams/show_create_table.html deleted file mode 100644 index a93cf0f7955..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_create_table.html +++ /dev/null @@ -1,22 +0,0 @@ -
- - - - - - SHOW - - - CREATE - - - TABLE - - - - table_name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_create_view.html b/src/current/_includes/v1.0/sql/diagrams/show_create_view.html deleted file mode 100644 index ce15714619f..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_create_view.html +++ /dev/null @@ -1,22 +0,0 @@ -
- - - - - - SHOW - - - CREATE - - - VIEW - - - - view_name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_databases.html b/src/current/_includes/v1.0/sql/diagrams/show_databases.html deleted file mode 100644 index 1ac5fd3061f..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_databases.html +++ /dev/null @@ -1,14 +0,0 @@ -
- - - - - - SHOW - - - DATABASES - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_grants.html b/src/current/_includes/v1.0/sql/diagrams/show_grants.html deleted file mode 100644 index f80155a5d1c..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_grants.html +++ /dev/null @@ -1,50 +0,0 @@ -
- - - - - - SHOW - - - GRANTS - - - ON - - - TABLE - - - - table_name - - - - , - - - DATABASE - - - - database_name - - - - , - - - FOR - - - - user_name - - - - , - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_index.html b/src/current/_includes/v1.0/sql/diagrams/show_index.html deleted file mode 100644 index 1f568cd590d..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_index.html +++ /dev/null @@ -1,22 +0,0 @@ -
- - - - - - SHOW - - - INDEX - - - FROM - - - - table_name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_tables.html b/src/current/_includes/v1.0/sql/diagrams/show_tables.html deleted file mode 100644 index cb226c7274d..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_tables.html +++ /dev/null @@ -1,22 +0,0 @@ -
- - - - - - SHOW - - - TABLES - - - FROM - - - - name - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_users.html b/src/current/_includes/v1.0/sql/diagrams/show_users.html deleted file mode 100644 index e2842939976..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_users.html +++ /dev/null @@ -1,14 +0,0 @@ -
- - - - - - SHOW - - - USERS - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/show_var.html b/src/current/_includes/v1.0/sql/diagrams/show_var.html deleted file mode 100644 index 998bed5324a..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/show_var.html +++ /dev/null @@ -1,17 +0,0 @@ -
- - - - - - SHOW - - - var_name - - - ALL - - - -
\ No newline at end of file diff --git a/src/current/_includes/v1.0/sql/diagrams/table.html b/src/current/_includes/v1.0/sql/diagrams/table.html deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/current/_includes/v1.0/sql/diagrams/table_constraint.html b/src/current/_includes/v1.0/sql/diagrams/table_constraint.html deleted file mode 100644 index 96ffad2076b..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/table_constraint.html +++ /dev/null @@ -1,110 +0,0 @@ -
- - - - - - CONSTRAINT - - - - name - - - - CHECK - - - ( - - - - a_expr - - - - PRIMARY - - - KEY - - - ( - - - - index_params - - - - ) - - - UNIQUE - - - ( - - - - index_params - - - - ) - - - COVERING - - - STORING - - - ( - - - - name_list - - - - ) - - - - opt_interleave - - - - FOREIGN - - - KEY - - - ( - - - - name_list - - - - ) - - - REFERENCES - - - - qualified_name - - - - - opt_column_list - - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/truncate.html b/src/current/_includes/v1.0/sql/diagrams/truncate.html deleted file mode 100644 index a397b2c1d04..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/truncate.html +++ /dev/null @@ -1,28 +0,0 @@ -
- - - - - - TRUNCATE - - - TABLE - - - - table_name - - - - , - - - CASCADE - - - RESTRICT - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/unique_column_level.html b/src/current/_includes/v1.0/sql/diagrams/unique_column_level.html deleted file mode 100644 index d9e817a0b14..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/unique_column_level.html +++ /dev/null @@ -1,59 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_name - - - - - column_type - - - - UNIQUE - - - - column_constraints - - - - , - - - - column_def - - - - - table_constraints - - - - ) - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/unique_table_level.html b/src/current/_includes/v1.0/sql/diagrams/unique_table_level.html deleted file mode 100644 index 6d17dbe543c..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/unique_table_level.html +++ /dev/null @@ -1,63 +0,0 @@ -
- - - - - - CREATE - - - TABLE - - - - table_name - - - - ( - - - - column_def - - - - , - - - CONSTRAINT - - - - name - - - - UNIQUE - - - ( - - - - column_name - - - - , - - - ) - - - - table_constraints - - - - ) - - - -
diff --git a/src/current/_includes/v1.0/sql/diagrams/update.html b/src/current/_includes/v1.0/sql/diagrams/update.html deleted file mode 100644 index 21c55a8a5bd..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/update.html +++ /dev/null @@ -1,101 +0,0 @@ -
- - - - - - UPDATE - - - - table_name - - - - AS - - - - name - - - - SET - - - - column_name - - - - = - - - - a_expr - - - - DEFAULT - - - ( - - - - column_name_list - - - - ) - - - = - - - ( - - - - a_expr - - - - DEFAULT - - - , - - - ) - - - - select_with_parens - - - - , - - - WHERE - - - - a_expr - - - - RETURNING - - - - target_list - - - - NOTHING - - - -
\ No newline at end of file diff --git a/src/current/_includes/v1.0/sql/diagrams/upsert.html b/src/current/_includes/v1.0/sql/diagrams/upsert.html deleted file mode 100644 index 0b765e2e5b1..00000000000 --- a/src/current/_includes/v1.0/sql/diagrams/upsert.html +++ /dev/null @@ -1,60 +0,0 @@ -
- - - - - - UPSERT - - - INTO - - - - qualified_name - - - - AS - - - - name - - - - ( - - - - qualified_name_list - - - - ) - - - - select_stmt - - - - DEFAULT - - - VALUES - - - RETURNING - - - - target_list - - - - NOTHING - - - -
diff --git a/src/current/_includes/v1.0/start-in-docker/mac-linux-steps.md b/src/current/_includes/v1.0/start-in-docker/mac-linux-steps.md deleted file mode 100644 index e8715c0dd48..00000000000 --- a/src/current/_includes/v1.0/start-in-docker/mac-linux-steps.md +++ /dev/null @@ -1,160 +0,0 @@ -## Before you begin - -If you have not already installed the official CockroachDB Docker image, go to [Install CockroachDB](install-cockroachdb.html) and follow the instructions under **Use Docker**. - -## Step 1. Create a bridge network - -Since you'll be running multiple Docker containers on a single host, with one CockroachDB node per container, you need to create what Docker refers to as a [bridge network](https://docs.docker.com/engine/userguide/networking/#/a-bridge-network). The bridge network will enable the containers to communicate as a single cluster while keeping them isolated from external networks. - -{% include copy-clipboard.html %} -~~~ shell -$ docker network create -d bridge roachnet -~~~ - -We've used `roachnet` as the network name here and in subsequent steps, but feel free to give your network any name you like. - -## Step 2. Start the first node - -{% include copy-clipboard.html %} -~~~ shell -$ docker run -d \ ---name=roach1 \ ---hostname=roach1 \ ---net=roachnet \ --p 26257:26257 -p 8080:8080 \ --v "${PWD}/cockroach-data/roach1:/cockroach/cockroach-data" \ -{{page.release_info.docker_image}}:{{page.release_info.version}} start --insecure -~~~ - -This command creates a container and starts the first CockroachDB node inside it. Let's look at each part: - -- `docker run`: The Docker command to start a new container. -- `-d`: This flag runs the container in the background so you can continue the next steps in the same shell. -- `--name`: The name for the container. This is optional, but a custom name makes it significantly easier to reference the container in other commands, for example, when opening a Bash session in the container or stopping the container. -- `--hostname`: The hostname for the container. You will use this to join other containers/nodes to the cluster. -- `--net`: The bridge network for the container to join. See step 1 for more details. -- `-p 26257:26257 -p 8080:8080`: These flags map the default port for inter-node and client-node communication (`26257`) and the default port for HTTP requests to the Admin UI (`8080`) from the container to the host. This enables inter-container communication and makes it possible to call up the Admin UI from a browser. -- `-v "${PWD}/cockroach-data/roach1:/cockroach/cockroach-data"`: This flag mounts a host directory as a data volume. This means that data and logs for this node will be stored in `${PWD}/cockroach-data/roach1` on the host and will persist after the container is stopped or deleted. For more details, see Docker's Bind Mounts topic. -- `{{page.release_info.docker_image}}:{{page.release_info.version}} start --insecure`: The CockroachDB command to [start a node](start-a-node.html) in the container in insecure mode. - -## Step 3. Add nodes to the cluster - -At this point, your cluster is live and operational. With just one node, you can already connect a SQL client and start building out your database. In real deployments, however, you'll always want 3 or more nodes to take advantage of CockroachDB's [automatic replication](demo-data-replication.html), [rebalancing](demo-automatic-rebalancing.html), and [fault tolerance](demo-fault-tolerance-and-recovery.html) capabilities. - -To simulate a real deployment, scale your cluster by adding two more nodes: - -{% include copy-clipboard.html %} -~~~ shell -$ docker run -d \ ---name=roach2 \ ---hostname=roach2 \ ---net=roachnet \ --v "${PWD}/cockroach-data/roach2:/cockroach/cockroach-data" \ -{{page.release_info.docker_image}}:{{page.release_info.version}} start --insecure --join=roach1 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ docker run -d \ ---name=roach3 \ ---hostname=roach3 \ ---net=roachnet \ --v "${PWD}/cockroach-data/roach3:/cockroach/cockroach-data" \ -{{page.release_info.docker_image}}:{{page.release_info.version}} start --insecure --join=roach1 -~~~ - -These commands add two more containers and start CockroachDB nodes inside them, joining them to the first node. There are only a few differences to note from step 2: - -- `-v`: This flag mounts a host directory as a data volume. Data and logs for these nodes will be stored in `${PWD}/cockroach-data/roach2` and `${PWD}/cockroach-data/roach3` on the host and will persist after the containers are stopped or deleted. -- `--join`: This flag joins the new nodes to the cluster, using the first container's `hostname`. Otherwise, all [`cockroach start`](start-a-node.html) defaults are accepted. Note that since each node is in a unique container, using identical default ports won’t cause conflicts. - -## Step 4. Test the cluster - -Now that you've scaled to 3 nodes, you can use any node as a SQL gateway to the cluster. To demonstrate this, use the `docker exec` command to start the [built-in SQL shell](use-the-built-in-sql-client.html) in the first container: - -{% include copy-clipboard.html %} -~~~ shell -$ docker exec -it roach1 ./cockroach sql --insecure -~~~ - -~~~ -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -Run some basic [CockroachDB SQL statements](learn-cockroachdb-sql.html): - -{% include copy-clipboard.html %} -~~~ sql -> CREATE DATABASE bank; -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> CREATE TABLE bank.accounts (id INT PRIMARY KEY, balance DECIMAL); -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> INSERT INTO bank.accounts VALUES (1, 1000.50); -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -Exit the SQL shell on node 1: - -{% include copy-clipboard.html %} -~~~ sql -> \q -~~~ - -Then start the SQL shell in the second container: - -{% include copy-clipboard.html %} -~~~ shell -$ docker exec -it roach2 ./cockroach sql --insecure -~~~ - -~~~ -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -Now run the same `SELECT` query: - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -As you can see, node 1 and node 2 behaved identically as SQL gateways. - -When you're done, exit the SQL shell on node 2: - -{% include copy-clipboard.html %} -~~~ sql -> \q -~~~ diff --git a/src/current/openssl_fix.rb b/src/current/openssl_fix.rb new file mode 100644 index 00000000000..285aa8c94c1 --- /dev/null +++ b/src/current/openssl_fix.rb @@ -0,0 +1,27 @@ +require "openssl" +require "net/http" + +# Monkey patch to completely disable SSL verification +module OpenSSL + module SSL + remove_const :VERIFY_PEER + VERIFY_PEER = VERIFY_NONE + end +end + +# Override Net::HTTP SSL context creation +class Net::HTTP + alias_method :original_use_ssl=, :use_ssl= + + def use_ssl=(flag) + self.original_use_ssl = flag + if flag + @ssl_context = OpenSSL::SSL::SSLContext.new + @ssl_context.verify_mode = OpenSSL::SSL::VERIFY_NONE + @ssl_context.verify_hostname = false + end + end +end + +# Set environment variable as fallback +ENV['SSL_VERIFY'] = 'false' \ No newline at end of file diff --git a/src/current/releases/v1.0.md b/src/current/releases/v1.0.md index 1f210f69c55..083d3b34c12 100644 --- a/src/current/releases/v1.0.md +++ b/src/current/releases/v1.0.md @@ -1,5 +1,5 @@ --- -title: What's New in v1.0 +title: What's New in v1.0 (Archived) toc: true toc_not_nested: true summary: Additions and changes in CockroachDB version v1.0 @@ -8,6 +8,11 @@ docs_area: releases keywords: gin, gin index, gin indexes, inverted index, inverted indexes, accelerated index, accelerated indexes --- +{{site.data.alerts.callout_info}} +CockroachDB v1.0 is no longer supported. For more details, refer to the +[Release Support Policy]({% link releases/release-support-policy.md %}). +{{site.data.alerts.end}} + {% assign rel = site.data.releases | where_exp: "rel", "rel.major_version == page.major_version" | sort: "release_date" | reverse %} {% assign vers = site.data.versions | where_exp: "vers", "vers.major_version == page.major_version" | first %} diff --git a/src/current/v1.0/404.md b/src/current/v1.0/404.md deleted file mode 100755 index 13a69ddde5c..00000000000 --- a/src/current/v1.0/404.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Page Not Found -description: "Page not found." -sitemap: false -search: exclude -related_pages: none -toc: false ---- - - -{%comment%} - - -{%endcomment%} \ No newline at end of file diff --git a/src/current/v1.0/add-column.md b/src/current/v1.0/add-column.md deleted file mode 100644 index e00a7bf5fe1..00000000000 --- a/src/current/v1.0/add-column.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: ADD COLUMN -summary: Use the ADD COLUMN statement to add columns to tables. -toc: true ---- - -The `ADD COLUMN` [statement](sql-statements.html) is part of `ALTER TABLE` and adds columns to tables. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/add_column.html %} - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table to which you want to add the column. | -| `name` | The name of the column you want to add. The column name must follow these [identifier rules](keywords-and-identifiers.html#identifiers) and must be unique within the table but can have the same name as indexes or constraints. | -| `typename` | The [data type](data-types.html) of the new column. | -| `col_qualification` | A list of column definitions, which may include [column-level constraints](constraints.html), [collation](collate.html), or [column family assignments](column-families.html).

Note that it is not possible to add a column with the [Foreign Key](foreign-key.html) constraint. As a workaround, you can add the column without the constraint, then use [`CREATE INDEX`](create-index.html) to index the column, and then use [`ADD CONSTRAINT`](add-constraint.html) to add the Foreign Key constraint to the column. | - -## Examples - -### Add a Single Column - -~~~ sql -> ALTER TABLE accounts ADD COLUMN names STRING; -~~~ - -~~~ sql -> SHOW COLUMNS FROM accounts; -~~~ - -~~~ -+-----------+-------------------+-------+---------+-----------+ -| Field | Type | Null | Default | Indices | -+-----------+-------------------+-------+---------+-----------+ -| id | INT | false | NULL | {primary} | -| balance | DECIMAL | true | NULL | {} | -| names | STRING | true | NULL | {} | -+-----------+-------------------+-------+---------+-----------+ -~~~ - -### Add Multiple Columns - -~~~ sql -> ALTER TABLE accounts ADD COLUMN location STRING, ADD COLUMN amount DECIMAL; -~~~ - -~~~ sql -> SHOW COLUMNS FROM accounts; -~~~ - -~~~ -+-----------+-------------------+-------+---------+-----------+ -| Field | Type | Null | Default | Indices | -+-----------+-------------------+-------+---------+-----------+ -| id | INT | false | NULL | {primary} | -| balance | DECIMAL | true | NULL | {} | -| names | STRING | true | NULL | {} | -| location | STRING | true | NULL | {} | -| amount | DECIMAL | true | NULL | {} | -+-----------+-------------------+-------+---------+-----------+ - -~~~ - -### Add a Non-Null Column with a Default Value - -~~~ sql -> ALTER TABLE accounts ADD COLUMN interest DECIMAL NOT NULL DEFAULT (DECIMAL '1.3'); -~~~ - -~~~ sql -> SHOW COLUMNS FROM accounts; -~~~ -~~~ -+-----------+-------------------+-------+---------------------------+-----------+ -| Field | Type | Null | Default | Indices | -+-----------+-------------------+-------+---------------------------+-----------+ -| id | INT | false | NULL | {primary} | -| balance | DECIMAL | true | NULL | {} | -| names | STRING | true | NULL | {} | -| location | STRING | true | NULL | {} | -| amount | DECIMAL | true | NULL | {} | -| interest | DECIMAL | false | ('1.3':::STRING::DECIMAL) | {} | -+-----------+-------------------+-------+---------------------------+-----------+ -~~~ - -### Add a Non-Null Column with Unique Values - -~~~ sql -> ALTER TABLE accounts ADD COLUMN cust_number DECIMAL UNIQUE NOT NULL; -~~~ - -### Add a Column with Collation - -~~~ sql -> ALTER TABLE accounts ADD COLUMN more_names STRING COLLATE en; -~~~ - -### Add a Column and Assign it to a Column Family - -#### Add a Column and Assign it to a New Column Family -~~~ sql -> ALTER TABLE accounts ADD COLUMN location1 STRING CREATE FAMILY new_family; -~~~ - -#### Add a Column and Assign it to an Existing Column Family -~~~ sql -> ALTER TABLE accounts ADD COLUMN location2 STRING FAMILY existing_family; -~~~ - -#### Add a Column and Create a New Column Family if Column Family Does Not Exist -~~~ sql -> ALTER TABLE accounts ADD COLUMN new_name STRING CREATE IF NOT EXISTS FAMILY f1; -~~~ - - -## See Also -- [`ALTER TABLE`](alter-table.html) -- [Column-level Constraints](constraints.html) -- [Collation](collate.html) -- [Column Families](column-families.html) diff --git a/src/current/v1.0/add-constraint.md b/src/current/v1.0/add-constraint.md deleted file mode 100644 index dd46a7e930e..00000000000 --- a/src/current/v1.0/add-constraint.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: ADD CONSTRAINT -summary: Use the ADD CONSTRAINT statement to add constraints to columns. -toc: true ---- - -The `ADD CONSTRAINT` [statement](sql-statements.html) is part of `ALTER TABLE` and can add the following [constraints](constraints.html) to columns: - -- [Check](check.html) -- [Foreign Keys](foreign-key.html) -- [Unique](unique.html) - -{{site.data.alerts.callout_info}} -The Primary Key and Not Null constraints can only be applied through CREATE TABLE. The Default constraint is managed through ALTER COLUMN.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/add_constraint.html %} - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table containing the column you want to constrain. | -| `name` | The name of the constraint, which must be unique to its table and follow these [identifier rules](keywords-and-identifiers.html#identifiers). | -| `constraint_elem` | The [Check](check.html), [Foreign Keys](foreign-key.html), [Unique](unique.html) constraint you want to add.

Adding/changing a Default constraint is done through [`ALTER COLUMN`](alter-column.html).

Adding/changing the table's Primary Key is not supported through `ALTER TABLE`; it can only be specified during [table creation](create-table.html#create-a-table-primary-key-defined). | - -## Examples - -### Add the Unique Constraint - -Adding the [Unique constraint](unique.html) requires that all of a column's values be distinct from one another (except for *NULL* values). - -``` sql -> ALTER TABLE orders ADD CONSTRAINT id_customer_unique UNIQUE (id, customer); -``` - -### Add the Check Constraint - -Adding the [Check constraint](check.html) requires that all of a column's values evaluate to `TRUE` for a Boolean expression. - -``` sql -> ALTER TABLE orders ADD CONSTRAINT total_0_check CHECK (total > 0); -``` - -### Add the Foreign Key Constraint - -Before you can add the [Foreign Key](foreign-key.html) constraint to columns, the columns must already be indexed. If they are not already indexed, use [`CREATE INDEX`](create-index.html) to index them and only then use the `ADD CONSTRAINT` statement to add the Foreign Key constraint to the columns. - -For example, let's say you have two simple tables, `orders` and `customers`: - -~~~ sql -> SHOW CREATE TABLE customers; -~~~ - -~~~ -+-----------+-------------------------------------------------+ -| Table | CreateTable | -+-----------+-------------------------------------------------+ -| customers | CREATE TABLE customers ( | -| | id INT NOT NULL, | -| | "name" STRING NOT NULL, | -| | address STRING NULL, | -| | CONSTRAINT "primary" PRIMARY KEY (id ASC), | -| | FAMILY "primary" (id, "name", address) | -| | ) | -+-----------+-------------------------------------------------+ -(1 row) -~~~ - -~~~ sql -> SHOW CREATE TABLE orders; -~~~ - -~~~ -+--------+-------------------------------------------------------------------------------------------------------------+ -| Table | CreateTable | -+--------+-------------------------------------------------------------------------------------------------------------+ -| orders | CREATE TABLE orders ( | -| | id INT NOT NULL, | -| | customer_id INT NULL, | -| | status STRING NOT NULL, | -| | CONSTRAINT "primary" PRIMARY KEY (id ASC), | -| | FAMILY "primary" (id, customer_id, status), | -| | CONSTRAINT check_status CHECK (status IN ('open':::STRING, 'complete':::STRING, 'cancelled':::STRING)) | -| | ) | -+--------+-------------------------------------------------------------------------------------------------------------+ -(1 row) -~~~ - -To ensure that each value in the `orders.customer_id` column matches a unique value in the `orders.id` column, you want to add the Foreign Key constraint to `orders.customer_id`. So you first create an index on `orders.customer_id`: - -~~~ sql -> CREATE INDEX ON orders (customer_id); -~~~ - -Then you add the Foreign Key constraint: - -~~~ sql -> ALTER TABLE orders ADD CONSTRAINT customer_fk FOREIGN KEY (customer_id) REFERENCES customers (id); -~~~ - -If you had tried to add the constraint before indexing the column, you would have received an error: - -~~~ -pq: foreign key requires an existing index on columns ("customer_id") -~~~ - -## See Also - -- [Constraints](constraints.html) -- [`ALTER COLUMN`](alter-column.html) -- [`CREATE TABLE`](create-table.html) -- [`ALTER TABLE`](alter-table.html) diff --git a/src/current/v1.0/alter-column.md b/src/current/v1.0/alter-column.md deleted file mode 100644 index f5d8c566303..00000000000 --- a/src/current/v1.0/alter-column.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: ALTER COLUMN -summary: Use the ALTER COLUMN statement to change a column's Default constraint. -toc: true ---- - -The `ALTER COLUMN` [statement](sql-statements.html) is part of `ALTER TABLE` and changes a column's [Default constraint](default-value.html) or drops the [Not Null constraint](not-null.html). - -{{site.data.alerts.callout_info}}To manage other constraints, see ADD CONSTRAINT and DROP CONSTRAINT{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/alter_column.html %} - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table with the column whose Default Value you want to modify. | -| `name` | The name of the column you want to modify. | -| `a_expr` | The new Default Value you want to use. | - -## Examples - -### Set or Change a Default Value - -Setting the [Default Value constraint](default-value.html) inserts the value when data's written to the table without explicitly defining the value for the column. If the column already has a Default Value set, you can use this statement to change it. - -The below example inserts the Boolean value `true` whenever you inserted data to the `subscriptions` table without defining a value for the `newsletter` column. - -``` sql -> ALTER TABLE subscriptions ALTER COLUMN newsletter SET DEFAULT true; -``` - -### Remove Default Constraint - -If the column has a defined [Default Value](default-value.html), you can remove the constraint, which means the column will no longer insert a value by default if one is not explicitly defined for the column. - -``` sql -> ALTER TABLE subscriptions ALTER COLUMN newsletter DROP DEFAULT; -``` - -### Remove Not Null Constraint - -If the column has the [Not Null constraint](not-null.html) applied to it, you can remove the constraint, which means the column becomes optional and can have *NULL* values written into it. - -``` sql -> ALTER TABLE subscriptions ALTER COLUMN newsletter DROP NOT NULL; -``` - -## See Also - -- [Constraints](constraints.html) -- [`ADD CONSTRAINT`](add-constraint.html) -- [`DROP CONSTRAINT`](drop-constraint.html) -- [`ALTER TABLE`](alter-table.html) diff --git a/src/current/v1.0/alter-table.md b/src/current/v1.0/alter-table.md deleted file mode 100644 index 4c02f7c4f13..00000000000 --- a/src/current/v1.0/alter-table.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: ALTER TABLE -summary: Use the ALTER TABLE statement to change the schema of a table. -toc: false ---- - -The `ALTER TABLE` [statement](sql-statements.html) applies a schema change to a table. - -{{site.data.alerts.callout_info}}To understand how CockroachDB changes schema elements without requiring table locking or other user-visible downtime, see Online Schema Changes in CockroachDB.{{site.data.alerts.end}} - -For information on using `ALTER TABLE`, see the documents for its relevant subcommands. - -Subcommand | Description ------------|------------ -[`ADD COLUMN`](add-column.html) | Add columns to tables. -[`ADD CONSTRAINT`](add-constraint.html) | Add constraints to columns. -[`ALTER COLUMN`](alter-column.html) | Change a column's [Default constraint](default-value.html) or drop the [Not Null constraint](not-null.html). -[`DROP COLUMN`](drop-column.html) | Remove columns from tables. -[`DROP CONSTRAINT`](drop-constraint.html) | Remove constraints from columns. -[`RENAME COLUMN`](rename-column.html) | Change the names of columns. -[`RENAME TABLE`](rename-table.html) | Change the names of tables. -`SPLIT AT` | *(Documentation pending)* Potentially improve performance by identifying ideal locations to split data in the key-value layer. diff --git a/src/current/v1.0/alter-view.md b/src/current/v1.0/alter-view.md deleted file mode 100644 index dcdc2f418d1..00000000000 --- a/src/current/v1.0/alter-view.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: ALTER VIEW -summary: The ALTER VIEW statement changes the name of a view. -toc: true ---- - -The `ALTER VIEW` [statement](sql-statements.html) changes the name of a [view](views.html). - -{{site.data.alerts.callout_info}}It is not currently possible to change the SELECT statement executed by a view. Instead, you must drop the existing view and create a new view. Also, it is not currently possible to rename a view that other views depend on, but this ability may be added in the future (see this issue).{{site.data.alerts.end}} - - -## Required Privileges - -The user must have the `DROP` [privilege](privileges.html) on the view and the `CREATE` privilege on the parent database. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/alter_view.html %} - -## Parameters - -Parameter | Description -----------|------------ -`IF EXISTS` | Rename the view only if a view of `view_name` exists; if one does not exist, do not return an error. -`view_name` | The name of the view to rename. To find view names, use:

`SELECT * FROM information_schema.tables WHERE table_type = 'VIEW';` -`name` | The new [`name`](sql-grammar.html#name) for the view, which must be unique to its database and follow these [identifier rules](keywords-and-identifiers.html#identifiers). - -## Example - -~~~ sql -> SELECT * FROM information_schema.tables WHERE table_type = 'VIEW'; -~~~ - -~~~ -+---------------+-------------------+--------------------+------------+---------+ -| TABLE_CATALOG | TABLE_SCHEMA | TABLE_NAME | TABLE_TYPE | VERSION | -+---------------+-------------------+--------------------+------------+---------+ -| def | bank | user_accounts | VIEW | 2 | -| def | bank | user_emails | VIEW | 1 | -+---------------+-------------------+--------------------+------------+---------+ -(2 rows) -~~~ - -~~~ sql -> ALTER VIEW bank.user_emails RENAME TO bank.user_email_addresses; -~~~ - -~~~ -RENAME VIEW -~~~ - -~~~ sql -> SELECT * FROM information_schema.tables WHERE table_type = 'VIEW'; -~~~ - -~~~ -+---------------+-------------------+----------------------+------------+---------+ -| TABLE_CATALOG | TABLE_SCHEMA | TABLE_NAME | TABLE_TYPE | VERSION | -+---------------+-------------------+----------------------+------------+---------+ -| def | bank | user_accounts | VIEW | 2 | -| def | bank | user_email_addresses | VIEW | 3 | -+---------------+-------------------+----------------------+------------+---------+ -(2 rows) -~~~ - -## See Also - -- [Views](views.html) -- [`CREATE VIEW`](create-view.html) -- [`SHOW CREATE VIEW`](show-create-view.html) -- [`DROP VIEW`](drop-view.html) diff --git a/src/current/v1.0/as-of-system-time.md b/src/current/v1.0/as-of-system-time.md deleted file mode 100644 index de2885ba65c..00000000000 --- a/src/current/v1.0/as-of-system-time.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: AS OF SYSTEM TIME -summary: The AS OF SYSTEM TIME clause executes a statement as of a specified time. -toc: false ---- - -The `AS OF SYSTEM TIME timestamp` clause is available in some statements to execute them as of the specified time. - -The `timestamp` argument supports various formats. - -Format | Notes ----|--- -[`INT`](int.html) | Nanoseconds since the Unix epoch. -[`STRING`](string.html) | A [`TIMESTAMP`](timestamp.html) or [`INT`](int.html) of nanoseconds. - -## Examples - -Assuming the following statements are run at `2016-01-01 12:00:00`, they would execute as of `2016-01-01 08:00:00`: - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM t AS OF SYSTEM TIME '2016-01-01 08:00:00' -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM t AS OF SYSTEM TIME 1451635200000000000 -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM t AS OF SYSTEM TIME '1451635200000000000' -~~~ - -## See Also - -- [Select Historical Data](select.html#select-historical-data-time-travel) -- [Time-Travel Queries](https://www.cockroachlabs.com/blog/time-travel-queries-select-witty_subtitle-the_future/) - -## Tech Note - -{{site.data.alerts.callout_info}}Although the following format is supported, it is not intended to be used by most users.{{site.data.alerts.end}} - -HLC timestamps can be specified using a [`DECIMAL`](decimal.html). The integer part is the wall time in nanoseconds. The fractional part is the logical counter, a 10-digit integer. This is the same format as produced by the `clutser_logical_timestamp()` function. diff --git a/src/current/v1.0/automated-scaling-and-repair.md b/src/current/v1.0/automated-scaling-and-repair.md deleted file mode 100644 index 5f8100e49d2..00000000000 --- a/src/current/v1.0/automated-scaling-and-repair.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Automated Scaling & Repair -summary: CockroachDB transparently manages scale with an upgrade path from a single node to hundreds. -toc: false ---- - -CockroachDB scales horizontally with minimal operator overhead. You can run it on your local computer, a single server, a corporate development cluster, or a private or public cloud. [Adding capacity](start-a-node.html) is as easy as pointing a new node at the running cluster. - -At the key-value level, CockroachDB starts off with a single, empty range. As you put data in, this single range eventually reaches a threshold size (64MB by default). When that happens, the data splits into two ranges, each covering a contiguous segment of the entire key-value space. This process continues indefinitely; as new data flows in, existing ranges continue to split into new ranges, aiming to keep a relatively small and consistent range size. - -When your cluster spans multiple nodes (physical machines, virtual machines, or containers), newly split ranges are automatically rebalanced to nodes with more capacity. CockroachDB communicates opportunities for rebalancing using a peer-to-peer [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) by which nodes exchange network addresses, store capacity, and other information. - -- Add resources to scale horizontally, with zero hassle and no downtime -- Self-organizes, self-heals, and automatically rebalances -- Migrate data seamlessly between clouds - -Automated scaling and repair in CockroachDB diff --git a/src/current/v1.0/back-up-data.md b/src/current/v1.0/back-up-data.md deleted file mode 100644 index 0e8a5bb84b1..00000000000 --- a/src/current/v1.0/back-up-data.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Back Up Data -summary: Learn how to back up and restore a CockroachDB cluster. -toc: false ---- - -CockroachDB offers the following methods to back up your cluster's data: - -- [`cockroach dump`](sql-dump.html), which is a CLI command to dump/export your database's schema and table data. -- [`BACKUP`](backup.html) (*[enterprise license](https://www.cockroachlabs.com/pricing/) only*), which is a SQL statement that backs up your cluster to cloud or network file storage. - -### Details - -We recommend creating daily backups of your data as an operational best practice. - -However, because CockroachDB is designed with high fault tolerance, backups are primarily needed for disaster recovery (i.e., if your cluster loses a majority of its nodes). Isolated issues (such as small-scale node outages) do not require any intervention. - -## Restore - -For information about restoring your backed up data, see [Restoring Data](restore-data.html). - -## See Also - -- [Restore Data](restore-data.html) -- [Use the Built-in SQL Client](use-the-built-in-sql-client.html) -- [Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/backup.md b/src/current/v1.0/backup.md deleted file mode 100644 index 72387e99407..00000000000 --- a/src/current/v1.0/backup.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: BACKUP -summary: Back up your CockroachDB cluster to a cloud storage services such as AWS S3, Google Cloud Storage, or other NFS. -toc: true ---- - -{{site.data.alerts.callout_danger}}The BACKUP feature is only available to enterprise license users. For non-enterprise backups, see cockroach dump.{{site.data.alerts.end}} - -CockroachDB's `BACKUP` [statement](sql-statements.html) creates full or incremental backups of your cluster's schemas and data that are consistent as of a given timestamp. These backups can be stored on the platforms you're already using, including AWS S3, Google Cloud Storage, NFS, or HTTP storage. - -Because CockroachDB is designed with high fault tolerance, these backups are designed primarily for disaster recovery (i.e., if your cluster loses a majority of its nodes) through [`RESTORE`](restore.html). Isolated issues (such as small-scale node outages) do not require any intervention. - - -## Functional Details - -### Backup Targets - -You can backup entire tables (which automatically includes their indexes) or [views](views.html). Backing up a database simply backs up all of its tables and views. - -{{site.data.alerts.callout_info}}BACKUP only offers table-level granularity; it does not support backing up subsets of a table.{{site.data.alerts.end}} - -### Object Dependencies - -Dependent objects should be backed up at the same time as the objects they depend on; otherwise, you cannot restore the dependent objects. - -Object | Depends On --------|----------- -Table with [foreign key](foreign-key.html) constraints | The table it `REFERENCES` (however, this dependency can be [removed during the restore](restore.html#skip_missing_foreign_keys)) -[Views](views.html) | The tables used in the view's `SELECT` statement -[Interleaved tables](interleave-in-parent.html) | The parent table in the [interleaved hierarchy](interleave-in-parent.html#interleaved-hierarchy) - -### Users and Privileges - -Every backup you create includes `system.users`, which stores your users and their passwords. To restore your users, you must use [this procedure](restore.html#restoring-users-from-system-users-backup). - -Restored tables inherit privilege grants from the target database; they do not preserve privilege grants from the backed up table because the restoring cluster may have different users. - -Table-level privileges must be [granted to users](grant.html) after the restore is complete. - -### Backup Types - -CockroachDB offers two types of backups: full and incremental. - -#### Full Backups - -Full backups contain an unreplicated copy of your data and can always be used to restore your cluster. These files are roughly the size of your data and require greater resources to produce than incremental backups. - -#### Incremental Backups - -Incremental backups are smaller and faster to produce than full backups because they contain only the data that has changed since a base set of backups you specify (which must include one full backup, and can include many incremental backups). - -You can only create incremental backups within the garbage collection period of the base backup's most recent timestamp. This is because incremental backups are created by finding which data has been created or modified since the most recent timestamp in the base backup––that timestamp data, though, is deleted by the garbage collection process. - -You can configure garbage collection periods on a per-table basis using the `ttlseconds` [replication zone setting](configure-replication-zones.html). - -## Performance - -The `BACKUP` process minimizes its impact to the cluster's performance by distributing work to all nodes. Each node backs up only a specific subset of the data it stores (those for which it serves writes; more details about this architectural concept forthcoming), with no two nodes backing up the same data. - -For best performance, we also recommend always starting backups with a specific [timestamp](timestamp.html) at least 10 seconds in the past. For example: - -~~~ sql -> BACKUP...AS OF SYSTEM TIME '2017-06-09 16:13:55.571516+00:00'; -~~~ - -This improves performance by decreasing the likelihood that the `BACKUP` will be [retried because it contends with other statements/transactions](transactions.html#transaction-retries). - -## Automating Backups - -We recommend automating daily backups of your cluster. - -To automate backups, you must have a client send the `BACKUP` statement to the cluster. - -Once the backup is complete, your client will receive a `BACKUP` response. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/backup.html %} - -## Required Privileges - -Only the `root` user can run `BACKUP`. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `table_pattern` | The table or [view](views.html) you want to back up. | -| `name` | The name of the database you want to back up (i.e., create backups of all tables and views in the database).| -| `destination` | The URL where you want to store the backup.

For information about this URL structure, see [Backup File URLs](#backup-file-urls). | -| `AS OF SYSTEM TIME timestamp` | Back up data as it existed as of [`timestamp`](as-of-system-time.html). However, the `timestamp` must be more recent than your cluster's last garbage collection (which defaults to occur every 24 hours, but is [configurable per table](configure-replication-zones.html#replication-zone-format)). | -| `INCREMENTAL FROM full_backup_location` | Create an incremental backup using the full backup stored at the URL `full_backup_location` as its base.

For information about this URL structure, see [Backup File URLs](#backup-file-urls). | -| `incremental_backup_location` | Create an incremental backup that includes all backups listed at the provided URLs.

Lists of incremental backups must be sorted from oldest to newest. The newest incremental backup's timestamp must be within the table's garbage collection period.

For information about this URL structure, see [Backup File URLs](#backup-file-urls).

For more information about garbage collection, see [Configure Replication Zones](configure-replication-zones.html#replication-zone-format). | - -### Backup File URLs - -The path to each backup must be unique. The URL for your backup's destination/locations must use the following format: - -{% include {{ page.version.version }}/misc/external-urls.md %} - -## Examples - -Per our guidance in the [Performance](#performance) section, we recommend starting backups from a time at least 10 seconds in the past using [`AS OF SYSTEM TIME`](as-of-system-time.html). - -### Backup a Single Table or View - -~~~ sql -> BACKUP bank.customers TO 'azure://acme-co-backup/table-customer-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -AS OF SYSTEM TIME '2017-06-09 16:13:55.571516+00:00'; -~~~ - -### Backup Multiple Tables - -~~~ sql -> BACKUP bank.customers, bank.accounts TO 'azure://acme-co-backup/tables-accounts-customers-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -AS OF SYSTEM TIME '2017-06-09 16:13:55.571516+00:00'); -~~~ - -### Backup an Entire Database - -~~~ sql -> BACKUP DATABASE bank TO 'azure://acme-co-backup/database-bank-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -AS OF SYSTEM TIME '2017-06-09 16:13:55.571516+00:00'; -~~~ - -### Create Incremental Backups - -Incremental backups must be based off of full backups you've already created. - -~~~ sql -> BACKUP DATABASE bank to 'azure://acme-co-backup/database-bank-2017-03-29-incremental?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -AS OF SYSTEM TIME '2017-06-09 16:13:55.571516+00:00' -INCREMENTAL FROM 'azure://acme-co-backup/database-bank-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -, 'azure://acme-co-backup/database-bank-2017-03-28-incremental?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co'; -~~~ - -## See Also - -- [`RESTORE`](restore.html) -- [Configure Replication Zones](configure-replication-zones.html) diff --git a/src/current/v1.0/begin-transaction.md b/src/current/v1.0/begin-transaction.md deleted file mode 100644 index e4fa80f16a4..00000000000 --- a/src/current/v1.0/begin-transaction.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: BEGIN -summary: Initiate a SQL transaction with the BEGIN statement in CockroachDB. -toc: true ---- - -The `BEGIN` [statement](sql-statements.html) initiates a [transaction](transactions.html), which either successfully executes all of the statements it contains or none at all. - -{{site.data.alerts.callout_danger}}When using transactions, your application should include logic to retry transactions that are aborted to break a dependency cycle between concurrent transactions.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/begin_transaction.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to initiate a transaction. However, privileges are required for each statement within a transaction. - -## Aliases - -In CockroachDB, the following are aliases for the `BEGIN` statement: - -- `BEGIN TRANSACTION` -- `START TRANSACTION` - -The following aliases also exist for [isolation levels](transactions.html#isolation-levels): - -- `REPEATABLE READ` is an alias for `SERIALIZABLE` -- `READ UNCOMMITTED` and `READ COMMITTED` are aliases for `SNAPSHOT` - -For more information on isolation level aliases, see [Comparison to ANSI SQL Isolation Levels](transactions.html#comparison-to-ansi-sql-isolation-levels). - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `ISOLATION LEVEL` | If you do not want the transaction to run as `SERIALIZABLE` (CockroachDB's default, which provides the highest level of isolation), you can specify `SNAPSHOT`, which can provide better performance in high-contention scenarios.

For more information, see [Transactions: Isolation Levels](transactions.html#isolation-levels).

**Default**: `SERIALIZABLE` | -| `PRIORITY` | If you do not want the transaction to run with `NORMAL` priority, you can set it to `LOW` or `HIGH`.

Transactions with higher priority are less likely to need to be retried.

For more information, see [Transactions: Priorities](transactions.html#transaction-priorities).

**Default**: `NORMAL` | - -## Examples - -### Begin a Transaction - -#### Use Default Settings - -Without modifying the `BEGIN` statement, the transaction uses `SERIALIZABLE` isolation and `NORMAL` priority. - -~~~ sql -> BEGIN; - -> SAVEPOINT cockroach_restart; - -> UPDATE products SET inventory = 0 WHERE sku = '8675309'; - -> INSERT INTO orders (customer, sku, status) VALUES (1001, '8675309', 'new'); - -> RELEASE SAVEPOINT cockroach_restart; - -> COMMIT; -~~~ - -{{site.data.alerts.callout_danger}}This example assumes you're using client-side intervention to handle transaction retries.{{site.data.alerts.end}} - -#### Change Isolation Level & Priority - -You can set a transaction's isolation level to `SNAPSHOT`, as well as its priority to `LOW` or `HIGH`. - -~~~ sql -> BEGIN ISOLATION LEVEL SNAPSHOT, PRIORITY HIGH; - -> SAVEPOINT cockroach_restart; - -> UPDATE products SET inventory = 0 WHERE sku = '8675309'; - -> INSERT INTO orders (customer, sku, status) VALUES (1001, '8675309', 'new'); - -> RELEASE SAVEPOINT cockroach_restart; - -> COMMIT; -~~~ - -You can also set a transaction's isolation level and priority with [`SET TRANSACTION`](set-transaction.html). - -{{site.data.alerts.callout_danger}}This example assumes you're using client-side intervention to handle transaction retries.{{site.data.alerts.end}} - -### Begin a Transaction with Automatic Retries - -CockroachDB will [automatically retry](transactions.html#transaction-retries) all transactions that contain both `BEGIN` and `COMMIT` in the same batch. Batching is controlled by your driver or client's behavior, but means that CockroachDB receives all of the statements as a single unit, instead of a number of requests. - -From the perspective of CockroachDB, a transaction sent as a batch looks like this: - -~~~ sql -> BEGIN; DELETE FROM customers WHERE id = 1; DELETE orders WHERE customer = 1; COMMIT; -~~~ - -However, in your application's code, batched transactions are often just multiple statements sent at once. For example, in Go, this transaction would sent as a single batch (and automatically retried): - -~~~ go -db.Exec( - "BEGIN; - - DELETE FROM customers WHERE id = 1; - - DELETE orders WHERE customer = 1; - - COMMIT;" -) -~~~ - -Issuing statements this way signals to CockroachDB that you do not need to change any of the statement's values if the transaction doesn't immediately succeed, so it can continually retry the transaction until it's accepted. - -## See Also - -- [Transactions](transactions.html) -- [`COMMIT`](commit-transaction.html) -- [`SAVEPOINT`](savepoint.html) -- [`RELEASE SAVEPOINT`](release-savepoint.html) -- [`ROLLBACK`](rollback-transaction.html) diff --git a/src/current/v1.0/bool.md b/src/current/v1.0/bool.md deleted file mode 100644 index b7bdd457701..00000000000 --- a/src/current/v1.0/bool.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: BOOL -summary: The BOOL data type stores Boolean values of false or true. -toc: true ---- - -The `BOOL` [data type](data-types.html) stores a Boolean value of `false` or `true`. - - -## Aliases - -In CockroachDB, `BOOLEAN` is an alias for `BOOL`. - -## Syntax - -There are two predefined -[named constants](sql-constants.html#named-constants) for `BOOL`: -`TRUE` and `FALSE` (the names are case-insensitive). - -Alternately, a boolean value can be obtained by coercing a numeric -value: zero is coerced to `FALSE`, and any non-zero value to `TRUE`. - -- `CAST(0 AS BOOL)` (false) -- `CAST(123 AS BOOL)` (true) - -## Size - -A `BOOL` value is 1 byte in width, but the total storage size is likely to be larger due to CockroachDB metadata. - -## Examples - -~~~ sql -> CREATE TABLE bool (a INT PRIMARY KEY, b BOOL, c BOOLEAN); - -> SHOW COLUMNS FROM bool; -~~~ -~~~ -+-------+------+-------+---------+ -| Field | Type | Null | Default | -+-------+------+-------+---------+ -| a | INT | false | NULL | -| b | BOOL | true | NULL | -| c | BOOL | true | NULL | -+-------+------+-------+---------+ -~~~ -~~~ sql -> INSERT INTO bool VALUES (12345, true, CAST(0 AS BOOL)); - -> SELECT * FROM bool; -~~~ -~~~ -+-------+------+-------+ -| a | b | c | -+-------+------+-------+ -| 12345 | true | false | -+-------+------+-------+ -~~~ - -## Supported Casting & Conversion - -`BOOL` values can be [cast](data-types.html#data-type-conversions-casts) to any of the following data types: - -Type | Details ------|-------- -`INT` | Converts `true` to `1`, `false` to `0` -`DECIMAL` | Converts `true` to `1`, `false` to `0` -`FLOAT` | Converts `true` to `1`, `false` to `0` -`STRING` | –– - -{{site.data.alerts.callout_info}}Because the SERIAL data type represents values automatically generated by CockroachDB to uniquely identify rows, you cannot meaningfully cast other data types as SERIAL values.{{site.data.alerts.end}} - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/build-a-c++-app-with-cockroachdb.md b/src/current/v1.0/build-a-c++-app-with-cockroachdb.md deleted file mode 100644 index c96e642e51e..00000000000 --- a/src/current/v1.0/build-a-c++-app-with-cockroachdb.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Build a C++ App with CockroachDB -summary: Learn how to use CockroachDB from a simple C++ application with a low-level client driver. -toc: true -twitter: false ---- - -This tutorial shows you how build a simple C++ application with CockroachDB using a PostgreSQL-compatible driver. We've tested and can recommend the [C++ libpqxx driver](https://github.com/jtv/libpqxx), so that driver is featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the libpqxx driver - -Install the C++ libpqxx driver as described in the [official documentation](https://github.com/jtv/libpqxx). - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the C++ code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, creating a table, inserting rows, and reading and printing the rows. - -Download the basic-sample.cpp file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ cpp -{% include {{ page.version.version }}/app/basic-sample.cpp %} -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time execute a batch of statements as an atomic transaction to transfer funds from one account to another, where all included statements are either committed or aborted. - -{{site.data.alerts.callout_info}}With the default SERIALIZABLE isolation level, CockroachDB may require the client to retry a transaction in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. You can copy and paste the retry function from here into your code.{{site.data.alerts.end}} - -Download the txn-sample.cpp file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ cpp -{% include {{ page.version.version }}/app/txn-sample.cpp %} -~~~ - -After running the code, use the [built-in SQL client](use-the-built-in-sql-client.html) to verify that funds were transferred from one account to another: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [C++ libpqxx driver](https://github.com/jtv/libpqxx). - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-clojure-app-with-cockroachdb.md b/src/current/v1.0/build-a-clojure-app-with-cockroachdb.md deleted file mode 100644 index eb9c421eac6..00000000000 --- a/src/current/v1.0/build-a-clojure-app-with-cockroachdb.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Build a Clojure App with CockroachDB -summary: Learn how to use CockroachDB from a simple Clojure application with a low-level client driver. -toc: true -twitter: false ---- - -This tutorial shows you how build a simple Clojure application with CockroachDB using [leiningen](https://leiningen.org/) and a PostgreSQL-compatible driver. We've tested and can recommend the [Clojure java.jdbc driver](https://clojure-doc.org/articles/ecosystem/java_jdbc/home/) in conjunction with the [PostgreSQL JDBC driver](https://jdbc.postgresql.org/), so that driver is featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install `leiningen` - -Install the Clojure `lein` utility as described in its [official documentation](https://leiningen.org/). - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Create a table in the new database - -As the `maxroach` user, use the [built-in SQL client](use-the-built-in-sql-client.html) to create an `accounts` table in the new database. - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure \ ---database=bank \ ---user=maxroach \ --e 'CREATE TABLE accounts (id INT PRIMARY KEY, balance INT)' -~~~ - -## Step 6. Run the Clojure code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Create a basic Clojure/JDBC project - -1. Create a new directory `myapp`. -2. Create a file `myapp/project.clj` and populate it with the following code, or download it directly. Be sure to place the file in the subdirectory `src/test` in your project. - - {% include copy-clipboard.html %} - ~~~ clojure - {% include {{ page.version.version }}/app/project.clj %} - ~~~ - -3. Create a file `myapp/src/test/util.clj` and populate it with the code from this file. Be sure to place the file in the subdirectory `src/test` in your project. - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, inserting rows and reading and printing the rows. - -Create a file `myapp/src/test/test.clj` and copy the code below to it, or download it directly. Be sure to rename this file to `test.clj` in the subdirectory `src/test` in your project. - -{% include copy-clipboard.html %} -~~~ clojure -{% include {{ page.version.version }}/app/basic-sample.clj %} -~~~ - -Run with: - -{% include copy-clipboard.html %} -~~~ shell -lein run -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time execute a batch of statements as an atomic transaction to transfer funds from one account to another, where all included statements are either committed or aborted. - -Copy the code below to `myapp/src/test/test.clj` or -download it directly. Again, preserve the file name `test.clj`. - -{{site.data.alerts.callout_info}}With the default SERIALIZABLE isolation level, CockroachDB may require the client to retry a transaction in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. You can copy and paste the retry function from here into your code.{{site.data.alerts.end}} - -{% include copy-clipboard.html %} -~~~ clojure -{% include {{ page.version.version }}/app/txn-sample.clj %} -~~~ - -Run with: - -{% include copy-clipboard.html %} -~~~ shell -lein run -~~~ - -After running the code, use the [built-in SQL client](use-the-built-in-sql-client.html) to verify that funds were transferred from one account to another: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [Clojure java.jdbc driver](https://clojure-doc.org/articles/ecosystem/java_jdbc/home/). - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-go-app-with-cockroachdb-gorm.md b/src/current/v1.0/build-a-go-app-with-cockroachdb-gorm.md deleted file mode 100644 index f3618968772..00000000000 --- a/src/current/v1.0/build-a-go-app-with-cockroachdb-gorm.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Build a Go App with CockroachDB -summary: Learn how to use CockroachDB from a simple Go application with the GORM ORM. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Go application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Go pq driver](https://godoc.org/github.com/lib/pq) and the [GORM ORM](http://gorm.io), so those are featured here. - -{{site.data.alerts.callout_success}}For a more realistic use of GORM with CockroachDB, see our examples-orms repository.{{site.data.alerts.end}} - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the GORM ORM - -To install [GORM](http://gorm.io), run the following command: - -{% include copy-clipboard.html %} -~~~ shell -$ go get -u github.com/jinzhu/gorm -~~~ - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Go code - -The following code uses the [GORM](http://gorm.io) ORM to map Go-specific objects to SQL operations. Specifically, `db.AutoMigrate(&Account{})` creates an `accounts` table based on the Account model, `db.Create(&Account{})` inserts rows into the table, and `db.Find(&accounts)` selects from the table so that balances can be printed. - -Copy the code or -download it directly. - -{% include copy-clipboard.html %} -~~~ go -{% include {{ page.version.version }}/app/gorm-basic-sample.go %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ go run gorm-basic-sample.go -~~~ - -The output should be: - -~~~ -Initial balances: -1 1000 -2 250 -~~~ - -To verify that the table and rows were created successfully, you can again use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SHOW TABLES' --database=bank -~~~ - -~~~ -+----------+ -| Table | -+----------+ -| accounts | -+----------+ -(1 row) -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000 | -| 2 | 250 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [GORM ORM](http://gorm.io), or check out a more realistic implementation of GORM with CockroachDB in our [`examples-orms`](https://github.com/cockroachdb/examples-orms) repository. - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-go-app-with-cockroachdb.md b/src/current/v1.0/build-a-go-app-with-cockroachdb.md deleted file mode 100644 index 3bd347b848b..00000000000 --- a/src/current/v1.0/build-a-go-app-with-cockroachdb.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Build a Go App with CockroachDB -summary: Learn how to use CockroachDB from a simple Go application with the Go pq driver. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Go application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Go pq driver](https://godoc.org/github.com/lib/pq) and the [GORM ORM](http://gorm.io), so those are featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the Go pq driver - -To install the [Go pq driver](https://godoc.org/github.com/lib/pq), run the following command: - -{% include copy-clipboard.html %} -~~~ shell -$ go get -u github.com/lib/pq -~~~ - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Go code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, creating a table, inserting rows, and reading and printing the rows. - -Download the basic-sample.go file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ go -{% include {{ page.version.version }}/app/basic-sample.go %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ go run basic-sample.go -~~~ - -The output should be: - -~~~ -Initial balances: -1 1000 -2 250 -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time will execute a batch of statements as an atomic transaction to transfer funds from one account to another, where all included statements are either committed or aborted. - -Download the txn-sample.go file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ go -{% include {{ page.version.version }}/app/txn-sample.go %} -~~~ - -With the default `SERIALIZABLE` isolation level, CockroachDB may require the [client to retry a transaction](transactions.html#transaction-retries) in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. For Go, the CockroachDB retry function is in the `crdb` package of the CockroachDB Go client. Clone the library into your `$GOPATH` as follows: - -{% include copy-clipboard.html %} -~~~ shell -$ mkdir -p $GOPATH/src/github.com/cockroachdb -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cd $GOPATH/src/github.com/cockroachdb -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ git clone git@github.com:cockroachdb/cockroach-go.git -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ go run txn-sample.go -~~~ - -The output should just be: - -~~~ shell -Success -~~~ - -However, if you want to verify that funds were transferred from one account to another, use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [Go pq driver](https://godoc.org/github.com/lib/pq). - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-java-app-with-cockroachdb-hibernate.md b/src/current/v1.0/build-a-java-app-with-cockroachdb-hibernate.md deleted file mode 100644 index 4d8c098450f..00000000000 --- a/src/current/v1.0/build-a-java-app-with-cockroachdb-hibernate.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Build a Java App with CockroachDB -summary: Learn how to use CockroachDB from a simple Java application with the Hibernate ORM. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Java application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Java jdbc driver](https://jdbc.postgresql.org/) and the [Hibernate ORM](http://hibernate.org/), so those are featured here. - -{{site.data.alerts.callout_success}}For a more realistic use of Hibernate with CockroachDB, see our examples-orms repository.{{site.data.alerts.end}} - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the Gradle build tool - -This tutorial uses the [Gradle build tool](https://gradle.org/) to get all dependencies for your application, including the Hibernate ORM. To install Gradle, run the following command: - -{% include copy-clipboard.html %} -~~~ shell -# On Mac: -$ brew install gradle -~~~ - -{% include copy-clipboard.html %} -~~~ shell -# On Ubuntu Linux: -$ apt-get install gradle -~~~ - -For other ways to install Gradle, see the [official documentation](https://gradle.org/install). - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Java code - -[Download and extract this tarball](https://github.com/cockroachdb/docs/raw/master/_includes/{{ page.version.version }}/app/hibernate-basic-sample/hibernate-basic-sample.tgz), which includes three files that work together: - -File | Description ------|------------ -[`hibernate.cfg.xml`](https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/app/hibernate-basic-sample/hibernate.cfg.xml) | This file specifies how to connect to the database and that the database schema will be deleted and recreated each time the app is run. It must be in the `src/main/resources` directory. -[`Sample.java`](https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/app/hibernate-basic-sample/Sample.java) | This file uses the Hibernate ORM to map Java-specific objects to SQL operations. It must be in the `src/main/java/com/cockroachlabs/` directory. -[`build.gradle`](https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/app/hibernate-basic-sample/build.gradle) | This is the file you run to execute your app. - -For more insight into this sample application, review the `Sample.java` file, which uses the [Hibernate ORM](http://hibernate.org/orm/) to map Java-specific objects to SQL operations. Specifically, an `accounts` table gets created based on the `Account` class, `session.save(new Account())` inserts rows into the table, and the `CriteriaQuery query` object defines the SQL query for selecting from the table so that balances can be printed. - -{% include copy-clipboard.html %} -~~~ java -{% include {{ page.version.version }}/app/hibernate-basic-sample/Sample.java %} -~~~ - -Then in the `hibernate-basic-sample` directory, run the gradle file to fetch the dependencies in `Sample.java` (including Hibernate) and run the application: - -{% include copy-clipboard.html %} -~~~ shell -$ gradle run -~~~ - -Toward the end of the output, you should see: - -~~~ shell -1 1000 -2 250 -~~~ - -To verify that the table and rows were created successfully, you can again use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SHOW TABLES' --database=bank -~~~ - -~~~ -+----------+ -| Table | -+----------+ -| accounts | -+----------+ -(1 row) -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000 | -| 2 | 250 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [Hibernate ORM](http://hibernate.org/orm/), or check out a more realistic implementation of Hibernate with CockroachDB in our [`examples-orms`](https://github.com/cockroachdb/examples-orms) repository. - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-java-app-with-cockroachdb.md b/src/current/v1.0/build-a-java-app-with-cockroachdb.md deleted file mode 100644 index dd3b318b5c9..00000000000 --- a/src/current/v1.0/build-a-java-app-with-cockroachdb.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Build a Java App with CockroachDB -summary: Learn how to use CockroachDB from a simple Java application with the jdbc driver. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Java application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Java jdbc driver](https://jdbc.postgresql.org/) and the [Hibernate ORM](http://hibernate.org/), so those are featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the Java jdbc driver - -Download and set up the Java jdbc driver as described in the [official documentation](https://jdbc.postgresql.org/documentation/setup/). - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Java code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, creating a table, inserting rows, and reading and printing the rows. - -Download the BasicSample.java file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ java -{% include {{ page.version.version }}/app/BasicSample.java %} -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time execute a batch of statements as an atomic transaction to transfer funds from one account to another, where all included statements are either committed or aborted. - -Download the TxnSample.java file, or create the file yourself and copy the code into it. - -{{site.data.alerts.callout_info}}With the default SERIALIZABLE isolation level, CockroachDB may require the client to retry a transaction in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. You can copy and paste the retry function from here into your code.{{site.data.alerts.end}} - -{% include copy-clipboard.html %} -~~~ java -{% include {{ page.version.version }}/app/TxnSample.java %} -~~~ - -After running the code, use the [built-in SQL client](use-the-built-in-sql-client.html) to verify that funds were transferred from one account to another: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [Java jdbc driver](https://jdbc.postgresql.org/). - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-nodejs-app-with-cockroachdb-sequelize.md b/src/current/v1.0/build-a-nodejs-app-with-cockroachdb-sequelize.md deleted file mode 100644 index a3ada316f90..00000000000 --- a/src/current/v1.0/build-a-nodejs-app-with-cockroachdb-sequelize.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Build a Node.js App with CockroachDB -summary: Learn how to use CockroachDB from a simple Node.js application with the Sequelize ORM. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Node.js application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Node.js pg driver](https://www.npmjs.com/package/pg) and the [Sequelize ORM](https://sequelize.org/), so those are featured here. - -{{site.data.alerts.callout_success}}For a more realistic use of Sequelize with CockroachDB, see our examples-orms repository.{{site.data.alerts.end}} - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the Sequelize ORM - -To install Sequelize, as well as a [CockroachDB Node.js package](https://github.com/cockroachdb/sequelize-cockroachdb) that accounts for some minor differences between CockroachDB and PostgreSQL, run the following command: - -{% include copy-clipboard.html %} -~~~ shell -$ npm install sequelize sequelize-cockroachdb -~~~ - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Node.js code - -The following code uses the [Sequelize](https://sequelize.org/) ORM to map Node.js-specific objects to SQL operations. Specifically, `Account.sync({force: true})` creates an `accounts` table based on the Account model (or drops and recreates the table if it already exists), `Account.bulkCreate([...])` inserts rows into the table, and `Account.findAll()` selects from the table so that balances can be printed. - -Copy the code or -download it directly. - -{% include copy-clipboard.html %} -~~~ js -{% include {{ page.version.version }}/app/sequelize-basic-sample.js %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ node sequelize-basic-sample.js -~~~ - -The output should be: - -~~~ shell -1 1000 -2 250 -~~~ - -To verify that the table and rows were created successfully, you can again use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SHOW TABLES' --database=bank -~~~ - -~~~ -+----------+ -| Table | -+----------+ -| accounts | -+----------+ -(1 row) -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000 | -| 2 | 250 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [Sequelize ORM](https://sequelize.org/), or check out a more realistic implementation of Sequelize with CockroachDB in our [`examples-orms`](https://github.com/cockroachdb/examples-orms) repository. - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-nodejs-app-with-cockroachdb.md b/src/current/v1.0/build-a-nodejs-app-with-cockroachdb.md deleted file mode 100644 index 8619cc715be..00000000000 --- a/src/current/v1.0/build-a-nodejs-app-with-cockroachdb.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Build a Node.js App with CockroachDB -summary: Learn how to use CockroachDB from a simple Node.js application with the Node.js pg driver. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Node.js application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Node.js pg driver](https://www.npmjs.com/package/pg) and the [Sequelize ORM](http://docs.sequelizejs.com/en/v3/), so those are featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install Node.js packages - -To let your application communicate with CockroachDB, install the [Node.js pg driver](https://www.npmjs.com/package/pg): - -{% include copy-clipboard.html %} -~~~ shell -$ npm install pg -~~~ - -The example app on this page also requires [`async`](https://www.npmjs.com/package/async): - -{% include copy-clipboard.html %} -~~~ shell -$ npm install async -~~~ - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Node.js code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, creating a table, inserting rows, and reading and printing the rows. - -Download the basic-sample.js file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ js -{% include {{ page.version.version }}/app/basic-sample.js %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ node basic-sample.js -~~~ - -The output should be: - -~~~ -Initial balances: -{ id: '1', balance: '1000' } -{ id: '2', balance: '250' } -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time execute a batch of statements as an atomic transaction to transfer funds from one account to another and then read the updated values, where all included statements are either committed or aborted. - -Download the txn-sample.js file, or create the file yourself and copy the code into it. - -{{site.data.alerts.callout_info}}With the default SERIALIZABLE isolation level, CockroachDB may require the client to retry a transaction in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. You can copy and paste the retry function from here into your code.{{site.data.alerts.end}} - -{% include copy-clipboard.html %} -~~~ js -{% include {{ page.version.version }}/app/txn-sample.js %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ node txn-sample.js -~~~ - -The output should be: - -~~~ -Balances after transfer: -{ id: '1', balance: '900' } -{ id: '2', balance: '350' } -~~~ - -However, if you want to verify that funds were transferred from one account to another, use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [Node.js pg driver](https://www.npmjs.com/package/pg). - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-php-app-with-cockroachdb.md b/src/current/v1.0/build-a-php-app-with-cockroachdb.md deleted file mode 100644 index e3d6002c5c3..00000000000 --- a/src/current/v1.0/build-a-php-app-with-cockroachdb.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Build a PHP App with CockroachDB -summary: Learn how to use CockroachDB from a simple PHP application with a low-level client driver. -toc: true -twitter: false ---- - -This tutorial shows you how build a simple PHP application with CockroachDB using a PostgreSQL-compatible driver. We've tested and can recommend the [php-pgsql driver](https://www.php.net/manual/en/book.pgsql.php), so that driver is featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the php-pgsql driver - -Install the php-pgsql driver as described in the [official documentation](https://www.php.net/manual/en/book.pgsql.php). - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Create a table in the new database - -As the `maxroach` user, use the [built-in SQL client](use-the-built-in-sql-client.html) to create an `accounts` table in the new database. - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure \ ---database=bank \ ---user=maxroach \ --e 'CREATE TABLE accounts (id INT PRIMARY KEY, balance INT)' -~~~ - -## Step 6. Run the PHP code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, inserting rows and reading and printing the rows. - -Download the basic-sample.php file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ php -{% include {{ page.version.version }}/app/basic-sample.php %} -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time execute a batch of statements as an atomic transaction to transfer funds from one account to another, where all included statements are either committed or aborted. - -Download the txn-sample.php file, or create the file yourself and copy the code into it. - -{{site.data.alerts.callout_info}}With the default SERIALIZABLE isolation level, CockroachDB may require the client to retry a transaction in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. You can copy and paste the retry function from here into your code.{{site.data.alerts.end}} - -{% include copy-clipboard.html %} -~~~ php -{% include {{ page.version.version }}/app/txn-sample.php %} -~~~ - -After running the code, use the [built-in SQL client](use-the-built-in-sql-client.html) to verify that funds were transferred from one account to another: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [php-pgsql driver](https://www.php.net/manual/en/book.pgsql.php). - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-python-app-with-cockroachdb-sqlalchemy.md b/src/current/v1.0/build-a-python-app-with-cockroachdb-sqlalchemy.md deleted file mode 100644 index f4173d44d3f..00000000000 --- a/src/current/v1.0/build-a-python-app-with-cockroachdb-sqlalchemy.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Build a Python App with CockroachDB -summary: Learn how to use CockroachDB from a simple Python application with the SQLAlchemy ORM. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Python application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Python psycopg2 driver](http://initd.org/psycopg/docs/) and the [SQLAlchemy ORM](https://docs.sqlalchemy.org/en/latest/), so those are featured here. - -{{site.data.alerts.callout_success}}For a more realistic use of SQLAlchemy with CockroachDB, see our examples-orms repository.{{site.data.alerts.end}} - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the SQLAlchemy ORM - -To install SQLAlchemy, as well as a [CockroachDB Python package](https://github.com/cockroachdb/sqlalchemy-cockroachdb) that accounts for some minor differences between CockroachDB and PostgreSQL, run the following command: - -{% include copy-clipboard.html %} -~~~ shell -$ pip install sqlalchemy sqlalchemy-cockroachdb psycopg2 -~~~ - -{{site.data.alerts.callout_success}} -You can substitute psycopg2 for other alternatives that include the psycopg python package. -{{site.data.alerts.end}} - -For other ways to install SQLAlchemy, see the [official documentation](http://docs.sqlalchemy.org/en/latest/intro.html#installation-guide). - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Python code - -The following code uses the [SQLAlchemy ORM](https://docs.sqlalchemy.org/en/latest/) to map Python-specific objects to SQL operations. Specifically, `Base.metadata.create_all(engine)` creates an `accounts` table based on the Account class, `session.add_all([Account(),... -])` inserts rows into the table, and `session.query(Account)` selects from the table so that balances can be printed. - -{{site.data.alerts.callout_info}} -The sqlalchemy-cockroachdb python package installed earlier is triggered by the cockroachdb:// prefix in the engine URL. Using postgres:// to connect to your cluster will not work. -{{site.data.alerts.end}} - -Copy the code or -download it directly. - -{% include copy-clipboard.html %} -~~~ python -{% include {{ page.version.version }}/app/sqlalchemy-basic-sample.py %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ python sqlalchemy-basic-sample.py -~~~ - -The output should be: - -~~~ shell -1 1000 -2 250 -~~~ - -To verify that the table and rows were created successfully, you can again use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SHOW TABLES' --database=bank -~~~ - -~~~ -+----------+ -| Table | -+----------+ -| accounts | -+----------+ -(1 row) -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000 | -| 2 | 250 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [SQLAlchemy ORM](https://docs.sqlalchemy.org/en/latest/), or check out a more realistic implementation of SQLAlchemy with CockroachDB in our [`examples-orms`](https://github.com/cockroachdb/examples-orms) repository. - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-python-app-with-cockroachdb.md b/src/current/v1.0/build-a-python-app-with-cockroachdb.md deleted file mode 100644 index 13a6ba99035..00000000000 --- a/src/current/v1.0/build-a-python-app-with-cockroachdb.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Build a Python App with CockroachDB -summary: Learn how to use CockroachDB from a simple Python application with the psycopg2 driver. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Python application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Python psycopg2 driver](http://initd.org/psycopg/docs/) and the [SQLAlchemy ORM](https://docs.sqlalchemy.org/en/latest/), so those are featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the psycopg2 driver - -To install the Python psycopg2 driver, run the following command: - -{% include copy-clipboard.html %} -~~~ shell -$ pip install psycopg2 -~~~ - -For other ways to install psycopg2, see the [official documentation](http://initd.org/psycopg/docs/install.html). - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Python code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, creating a table, inserting rows, and reading and printing the rows. - -Download the basic-sample.py file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ python -{% include {{ page.version.version }}/app/basic-sample.py %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ python basic-sample.py -~~~ - -The output should be: - -~~~ -Initial balances: -['1', '1000'] -['2', '250'] -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time execute a batch of statements as an atomic transaction to transfer funds from one account to another, where all included statements are either committed or aborted. - -Download the txn-sample.py file, or create the file yourself and copy the code into it. - -{{site.data.alerts.callout_info}}With the default SERIALIZABLE isolation level, CockroachDB may require the client to retry a transaction in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. You can copy and paste the retry function from here into your code.{{site.data.alerts.end}} - -{% include copy-clipboard.html %} -~~~ python -{% include {{ page.version.version }}/app/txn-sample.py %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ python txn-sample.py -~~~ - -The output should be: - -~~~ shell -Balances after transfer: -['1', '900'] -['2', '350'] -~~~ - -However, if you want to verify that funds were transferred from one account to another, use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [Python psycopg2 driver](http://initd.org/psycopg/docs/). - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-ruby-app-with-cockroachdb-activerecord.md b/src/current/v1.0/build-a-ruby-app-with-cockroachdb-activerecord.md deleted file mode 100644 index d7dd84f2c27..00000000000 --- a/src/current/v1.0/build-a-ruby-app-with-cockroachdb-activerecord.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Build a Ruby App with CockroachDB -summary: Learn how to use CockroachDB from a simple Ruby application with the ActiveRecord ORM. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Ruby application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Ruby pg driver](https://rubygems.org/gems/pg) and the [ActiveRecord ORM](http://guides.rubyonrails.org/active_record_basics.html), so those are featured here. - -{{site.data.alerts.callout_success}}For a more realistic use of ActiveRecord with CockroachDB, see our examples-orms repository.{{site.data.alerts.end}} - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the ActiveRecord ORM - -To install ActiveRecord as well as the [pg driver](https://rubygems.org/gems/pg) and a [CockroachDB Ruby package](https://github.com/cockroachdb/activerecord-cockroachdb-adapter) that accounts for some minor differences between CockroachDB and PostgreSQL, run the following command: - -{% include copy-clipboard.html %} -~~~ shell -$ gem install activerecord pg activerecord-cockroachdb-adapter -~~~ - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Ruby code - -The following code uses the [ActiveRecord](http://guides.rubyonrails.org/active_record_basics.html) ORM to map Ruby-specific objects to SQL operations. Specifically, `Schema.new.change()` creates an `accounts` table based on the Account model (or drops and recreates the table if it already exists), `Account.create()` inserts rows into the table, and `Account.all` selects from the table so that balances can be printed. - -Copy the code or -download it directly. - -{% include copy-clipboard.html %} -~~~ ruby -{% include {{ page.version.version }}/app/activerecord-basic-sample.rb %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ ruby activerecord-basic-sample.rb -~~~ - -The output should be: - -~~~ shell --- create_table(:accounts, {:force=>true}) - -> 0.0361s -1 1000 -2 250 -~~~ - -To verify that the table and rows were created successfully, you can again use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SHOW TABLES' --database=bank -~~~ - -~~~ -+----------+ -| Table | -+----------+ -| accounts | -+----------+ -(1 row) -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000 | -| 2 | 250 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [ActiveRecord ORM](http://guides.rubyonrails.org/active_record_basics.html), or check out a more realistic implementation of ActiveRecord with CockroachDB in our [`examples-orms`](https://github.com/cockroachdb/examples-orms) repository. - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-ruby-app-with-cockroachdb.md b/src/current/v1.0/build-a-ruby-app-with-cockroachdb.md deleted file mode 100644 index fb15aad0f76..00000000000 --- a/src/current/v1.0/build-a-ruby-app-with-cockroachdb.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Build a Ruby App with CockroachDB -summary: Learn how to use CockroachDB from a simple Ruby application with the pg client driver. -toc: true -twitter: false ---- - - - -This tutorial shows you how build a simple Ruby application with CockroachDB using a PostgreSQL-compatible driver or ORM. We've tested and can recommend the [Ruby pg driver](https://rubygems.org/gems/pg) and the [ActiveRecord ORM](http://guides.rubyonrails.org/active_record_basics.html), so those are featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the Ruby pg driver - -To install the [Ruby pg driver](https://rubygems.org/gems/pg), run the following command: - -{% include copy-clipboard.html %} -~~~ shell -$ gem install pg -~~~ - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Run the Ruby code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, creating a table, inserting rows, and reading and printing the rows. - -Download the basic-sample.rb file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ ruby -{% include {{ page.version.version }}/app/basic-sample.rb %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ ruby basic-sample.rb -~~~ - -The output should be: - -~~~ -Initial balances: -{"id"=>"1", "balance"=>"1000"} -{"id"=>"2", "balance"=>"250"} -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time execute a batch of statements as an atomic transaction to transfer funds from one account to another, where all included statements are either committed or aborted. - -Download the txn-sample.rb file, or create the file yourself and copy the code into it. - -{{site.data.alerts.callout_info}}With the default SERIALIZABLE isolation level, CockroachDB may require the client to retry a transaction in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. You can copy and paste the retry function from here into your code.{{site.data.alerts.end}} - -{% include copy-clipboard.html %} -~~~ ruby -{% include {{ page.version.version }}/app/txn-sample.rb %} -~~~ - -Then run the code: - -{% include copy-clipboard.html %} -~~~ shell -$ ruby txn-sample.rb -~~~ - -To verify that funds were transferred from one account to another, use the [built-in SQL client](use-the-built-in-sql-client.html): - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the [Ruby pg driver](https://rubygems.org/gems/pg). - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-a-rust-app-with-cockroachdb.md b/src/current/v1.0/build-a-rust-app-with-cockroachdb.md deleted file mode 100644 index 6f7c009a7b7..00000000000 --- a/src/current/v1.0/build-a-rust-app-with-cockroachdb.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Build a Rust App with CockroachDB -summary: Learn how to use CockroachDB from a simple Rust application with a low-level client driver. -toc: true -twitter: false ---- - -This tutorial shows you how build a simple Rust application with CockroachDB using a PostgreSQL-compatible driver. We've tested and can recommend the Rust Postgres driver, so that driver is featured here. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Install the Rust Postgres driver - -Install the Rust Postgres driver as described in the official documentation. - -{% include {{ page.version.version }}/app/common-steps.md %} - -## Step 5. Create a table in the new database - -As the `maxroach` user, use the [built-in SQL client](use-the-built-in-sql-client.html) to create an `accounts` table in the new database. - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure \ ---database=bank \ ---user=maxroach \ --e 'CREATE TABLE accounts (id INT PRIMARY KEY, balance INT)' -~~~ - -## Step 6. Run the Rust code - -Now that you have a database and a user, you'll run code to create a table and insert some rows, and then you'll run code to read and update values as an atomic [transaction](transactions.html). - -### Basic Statements - -First, use the following code to connect as the `maxroach` user and execute some basic SQL statements, inserting rows and reading and printing the rows. - -Download the basic-sample.rs file, or create the file yourself and copy the code into it. - -{% include copy-clipboard.html %} -~~~ rust -{% include {{ page.version.version }}/app/basic-sample.rs %} -~~~ - -### Transaction (with retry logic) - -Next, use the following code to again connect as the `maxroach` user but this time execute a batch of statements as an atomic transaction to transfer funds from one account to another, where all included statements are either committed or aborted. - -Download the txn-sample.rs file, or create the file yourself and copy the code into it. - -{{site.data.alerts.callout_info}}With the default SERIALIZABLE isolation level, CockroachDB may require the client to retry a transaction in case of read/write contention. CockroachDB provides a generic retry function that runs inside a transaction and retries it as needed. You can copy and paste the retry function from here into your code.{{site.data.alerts.end}} - -{% include copy-clipboard.html %} -~~~ rust -{% include {{ page.version.version }}/app/txn-sample.rs %} -~~~ - -After running the code, use the [built-in SQL client](use-the-built-in-sql-client.html) to verify that funds were transferred from one account to another: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -e 'SELECT id, balance FROM accounts' --database=bank -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 900 | -| 2 | 350 | -+----+---------+ -(2 rows) -~~~ - -## What's Next? - -Read more about using the Rust Postgres driver. - -You might also be interested in using a local cluster to explore the following core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/build-an-app-with-cockroachdb.md b/src/current/v1.0/build-an-app-with-cockroachdb.md deleted file mode 100644 index f18c095116f..00000000000 --- a/src/current/v1.0/build-an-app-with-cockroachdb.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Build an App with CockroachDB -summary: The tutorials in this section show you how to build a simple application with CockroachDB, using PostgreSQL-compatible client drivers and ORMs. -tags: golang, python, java -toc: false -twitter: false ---- - -The tutorials in this section show you how to build a simple application with CockroachDB using PostgreSQL-compatible client drivers and ORMs that we have tested and can recommend. - -{{site.data.alerts.callout_info}}If you have problems with a recommended ORM, please open an issue with details. If you would like us to support additional ORMs, please let us know here.{{site.data.alerts.end}} - -App Language | Featured Driver | Featured ORM --------------|-----------------|------------- -Go | [pq](build-a-go-app-with-cockroachdb.html) | [GORM](build-a-go-app-with-cockroachdb-gorm.html) -Python | [psycopg2](build-a-python-app-with-cockroachdb.html) | [SQLAlchemy](build-a-python-app-with-cockroachdb-sqlalchemy.html) -Ruby | [pg](build-a-ruby-app-with-cockroachdb.html) | [ActiveRecord](build-a-ruby-app-with-cockroachdb-activerecord.html) -Java | [jdbc](build-a-java-app-with-cockroachdb.html) | [Hibernate](build-a-java-app-with-cockroachdb-hibernate.html) -Node.js | [pg](build-a-nodejs-app-with-cockroachdb.html) | [Sequelize](build-a-nodejs-app-with-cockroachdb-sequelize.html) -C++ | [libpqxx](build-a-c++-app-with-cockroachdb.html) | No ORMs tested -Clojure | [java.jdbc](build-a-clojure-app-with-cockroachdb.html) | No ORMs tested -PHP | [php-pgsql](build-a-php-app-with-cockroachdb.html) | No ORMs tested -Rust | [postgres](build-a-rust-app-with-cockroachdb.html) | No ORMs tested diff --git a/src/current/v1.0/bytes.md b/src/current/v1.0/bytes.md deleted file mode 100644 index 0bca1fed8ea..00000000000 --- a/src/current/v1.0/bytes.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: BYTES -summary: The BYTES data type stores binary strings of variable length. -toc: true ---- - -The `BYTES` [data type](data-types.html) stores binary strings of variable length. - - -## Aliases - -In CockroachDB, the following are aliases for `BYTES`: - -- `BYTEA` -- `BLOB` - -## Syntax - -To express a byte array constant, see the section on -[byte array literals](sql-constants.html#byte-array-literals) for more -details. For example, the following three are equivalent literals for the same -byte array: `b'abc'`, `b'\141\142\143'`, `b'\x61\x62\x63'`. - -In addition to this syntax, CockroachDB also supports using -[string literals](sql-constants.html#string-literals), including the -syntax `'...'`, `e'...'` and `x'....'` in contexts where a byte array -is otherwise expected. - -## Size - -The size of a `BYTES` value is variable, but it's recommended to keep values under 64 kilobytes to ensure performance. Above that threshold, [write amplification](https://en.wikipedia.org/wiki/Write_amplification) and other considerations may cause significant performance degradation. - -## Example - -~~~ sql -> CREATE TABLE bytes (a INT PRIMARY KEY, b BYTES); - -> -- explicitly typed BYTES literals -> INSERT INTO bytes VALUES (1, b'\141\142\143'), (2, b'\x61\x62\x63'), (3, b'\141\x62\c'); - -> -- string literal implicitly typed as BYTES -> INSERT INTO bytes VALUES (4, 'abc'); - - -> SELECT * FROM bytes; -~~~ -~~~ -+---+-----+ -| a | b | -+---+-----+ -| 1 | abc | -| 2 | abc | -| 3 | abc | -| 4 | abc | -+---+-----+ -(4 rows) -~~~ - -## Supported Conversions - -`BYTES` values can be -[cast](data-types.html#data-type-conversions-casts) explicitly to -`STRING`. The conversion verifies that the byte array contains only -valid UTF-8 byte sequences; an error is reported otherwise. - -`STRING` values can be cast explicitly to `BYTES`. This conversion -always succeeds. - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/check.md b/src/current/v1.0/check.md deleted file mode 100644 index 5b653fc45e5..00000000000 --- a/src/current/v1.0/check.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: CHECK Constraint -summary: The CHECK constraint specifies that values for the column in INSERT or UPDATE statements must satisfy a Boolean expression. -toc: true ---- - -The `CHECK` [constraint](constraints.html) specifies that values for the column in [`INSERT`](insert.html) or [`UPDATE`](update.html) statements must return `TRUE` or `NULL` for a Boolean expression. If any values return `FALSE`, the entire statement is rejected. - - -## Details - -- If you add a `CHECK` constraint to an existing table, existing values are not checked. However, any updates to those values will be. - {{site.data.alerts.callout_info}}In the future we plan to expand the Check constraint to include a check on any existing values in the column.{{site.data.alerts.end}} -- `CHECK` constraints may be specified at the column or table level and can reference other columns within the table. Internally, all column-level `CHECK` constraints are converted to table-level constraints so they can be handled consistently. -- You can have multiple `CHECK` constraints on a single column but ideally, for performance optimization, these should be combined using the logical operators. For example: - - ~~~ sql - warranty_period INT CHECK (warranty_period >= 0) CHECK (warranty_period <= 24) - ~~~ - - should be specified as: - - ~~~ sql - warranty_period INT CHECK (warranty_period BETWEEN 0 AND 24) - ~~~ - -## Syntax - -`CHECK` constraints can be defined at the [table level](#table-level). However, if you only want the constraint to apply to a single column, it can be applied at the [column level](#column-level). - -{{site.data.alerts.callout_info}}You can also add the CHECK constraint to existing tables through ADD CONSTRAINT.{{site.data.alerts.end}} - -### Column Level - -{% include {{ page.version.version }}/sql/diagrams/check_column_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_name` | The name of the constrained column. | -| `column_type` | The constrained column's [data type](data-types.html). | -| `check_expr` | An expression that returns a Boolean value; if the expression evaluates to `FALSE`, the value cannot be inserted.| -| `column_constraints` | Any other column-level [constraints](constraints.html) you want to apply to this column. | -| `column_def` | Definitions for any other columns in the table. | -| `table_constraints` | Any table-level [constraints](constraints.html) you want to apply. | - -**Example** - -~~~ sql -> CREATE TABLE inventories ( - product_id INT NOT NULL, - warehouse_id INT NOT NULL, - quantity_on_hand INT NOT NULL CHECK (quantity_on_hand > 0), - PRIMARY KEY (product_id, warehouse_id) - ); -~~~ - -### Table Level - -{% include {{ page.version.version }}/sql/diagrams/check_table_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_def` | Definitions for any other columns in the table. | -| `name` | The name you want to use for the constraint, which must be unique to its table and follow these [identifier rules](keywords-and-identifiers.html#identifiers). | -| `check_expr` | An expression that returns a Boolean value; if the expression evaluates to `FALSE`, the value cannot be inserted.| -| `table_constraints` | Any other table-level [constraints](constraints.html) you want to apply. | - -**Example** - -~~~ sql -> CREATE TABLE inventories ( - product_id INT NOT NULL, - warehouse_id INT NOT NULL, - quantity_on_hand INT NOT NULL, - PRIMARY KEY (product_id, warehouse_id), - CONSTRAINT ok_to_supply CHECK (quantity_on_hand > 0 AND warehouse_id BETWEEN 100 AND 200) - ); -~~~ - -## Usage Example - -`CHECK` constraints may be specified at the column or table level and can reference other columns within the table. Internally, all column-level `CHECK` constraints are converted to table-level constraints so they can be handled in a consistent fashion. - -~~~ sql -> CREATE TABLE inventories ( - product_id INT NOT NULL, - warehouse_id INT NOT NULL, - quantity_on_hand INT NOT NULL CHECK (quantity_on_hand > 0), - PRIMARY KEY (product_id, warehouse_id) - ); - -> INSERT INTO inventories (product_id, warehouse_id, quantity_on_hand) VALUES (1, 2, 0); -~~~ -~~~ -pq: failed to satisfy CHECK constraint (quantity_on_hand > 0) -~~~ - -## See Also - -- [Constraints](constraints.html) -- [`DROP CONSTRAINT`](drop-constraint.html) -- [Default Value constraint](default-value.html) -- [Foreign Key constraint](foreign-key.html) -- [Not Null constraint](not-null.html) -- [Primary Key constraint](primary-key.html) -- [Unique constraint](unique.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) diff --git a/src/current/v1.0/cloud-deployment.md b/src/current/v1.0/cloud-deployment.md deleted file mode 100644 index b1569477196..00000000000 --- a/src/current/v1.0/cloud-deployment.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Cloud Deployment -summary: Learn how to deploy CockroachDB on Google Cloud Platform GCE or AWS. -toc: false ---- - -Use the following guides to deploy CockroachDB to popular cloud platforms: - -- [Amazon Web Services (AWS)](deploy-cockroachdb-on-aws.html) -- [Digital Ocean](deploy-cockroachdb-on-digital-ocean.html) -- [Google Cloud Platform (GCE)](deploy-cockroachdb-on-google-cloud-platform.html) -- [Microsoft Azure](deploy-cockroachdb-on-microsoft-azure.html) - -## General Deployment Steps - -If we do not have a guide for your platform, you can deploy CockroachDB to any cloud environment using the following steps: - -1. Create firewall rules to allow TCP communication on the following ports: - - **26257** (`tcp:26257`) for nodes to join clusters and connect with applications - - **8080** (`tcp:8080`) to expose your Admin UI - -2. Manually deploy CockroachDB using one of the following steps: - - [Secure deployments](manual-deployment.html) - - [Insecure deployments](manual-deployment-insecure.html) *(not recommended for production)* - -## See Also - -- [Manual Deployment](manual-deployment.html) -- [Orchestration](orchestration.html) -- [Monitoring](monitor-cockroachdb-with-prometheus.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/cluster-settings.md b/src/current/v1.0/cluster-settings.md deleted file mode 100644 index f580dc87553..00000000000 --- a/src/current/v1.0/cluster-settings.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Cluster Settings -summary: Learn about cluster settings that apply to all nodes of a CockroachDB cluster. -toc: true ---- - -This page shows you how to view and change CockroachDB's **cluster-wide settings**. - -{{site.data.alerts.callout_info}}In contrast to cluster-wide settings, node-level settings apply to a single node. They are defined by flags passed to the cockroach start command when starting a node and cannot be changed without stopping and restarting the node. For more details, see Start a Node.{{site.data.alerts.end}} - - -## Overview - -Cluster settings apply to all nodes of a CockroachDB cluster and control, for example, whether or not to share diagnostic details with Cockroach Labs as well as advanced options for debugging and cluster tuning. - -They can be updated anytime after a cluster has been started, but only by the `root` user. - -## Settings - -{{site.data.alerts.callout_danger}}Many cluster settings are intended for tuning CockroachDB internals. Before changing these settings, we strongly encourage you to discuss your goals with Cockroach Labs; otherwise, you use them at your own risk.{{site.data.alerts.end}} - -The following settings can be configured without further input from Cockroach Labs: - -| Setting | Description | Value type | Default value | -|---------|-------------|---------------|---------------| -| `diagnostics.reporting.enabled` | Enable automatic reporting of usage data to Cockroach Labs. | Boolean | `true` | -| `diagnostics.reporting.interval` | Interval between automatic reports. **Note that increasing this value will also cause memory usage per node to increase, as the reporting data is collected into RAM.** | Interval | 1 hour | -| `diagnostics.reporting.report_metrics` | Enable collection and reporting of diagnostic metrics. Only applicable if `diagnostics.reporting.enabled` is `true`. | Boolean | `true` | -| `diagnostics.reporting.send_crash_reports` | Enable collection and reporting of node crashes. Only applicable if `diagnostics.reporting.enabled` is `true`. | Boolean | `true` | -| `sql.defaults.distsql` | Define whether new client sessions try to [distribute query execution](https://www.cockroachlabs.com/blog/local-and-distributed-processing-in-cockroachdb/) by default. | Integer | 1 (automatic) | -| `sql.metrics.statement_details.enabled` | Collect per-node, per-statement query statistics, visible in the virtual table `crdb_internal.node_statement_statistics`. | Boolean | `true` | -| `sql.metrics.statement_details.dump_to_logs` | On each node, also copy collected per-statement statistics to the [logging output](debug-and-error-logs.html) when automatic reporting is enabled. | Boolean | `false` | -| `sql.metrics.statement_details.threshold` | Only collect per-statement statistics for statements that run longer than this threshold. | Interval | 0 seconds (all statements) | -| `sql.trace.log_statement_execute` | On each node, copy all executed statements to the [logging output](debug-and-error-logs.html). | Boolean | `false` | - - - -## View Current Cluster Settings - -Use the [`SHOW CLUSTER SETTING`](show-cluster-setting.html) statement. - -## Change a Cluster Setting - -Use the [`SET CLUSTER SETTING`](set-cluster-setting.html) statement. - -Before changing a cluster setting, please note the following: - -- Changing a cluster setting is not instantaneous, as the change must be propagated to other nodes in the cluster. - -- It's not recommended to change cluster settings while [upgrading to a new version of CockroachDB](upgrade-cockroach-version.html); wait until all nodes have been upgraded and then make the change. - -## See Also - -- [`SET CLUSTER SETTING`](set-cluster-setting.html) -- [`SHOW CLUSTER SETTING`](show-cluster-setting.html) -- [Diagnostics Reporting](diagnostics-reporting.html) -- [Start a Node](start-a-node.html) -- [Use the Built-in SQL Client](use-the-built-in-sql-client.html) diff --git a/src/current/v1.0/cluster-setup-troubleshooting.md b/src/current/v1.0/cluster-setup-troubleshooting.md deleted file mode 100644 index 47fac5cc303..00000000000 --- a/src/current/v1.0/cluster-setup-troubleshooting.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Cluster & Node Setup Troubleshooting -summary: Learn how to troubleshoot issues with starting CockroachDB clusters -toc: true ---- - -If you're having trouble starting or scaling your cluster, this page will help you troubleshoot the issue. - - -## Before You Begin - -### Terminology - -To use this guide, it's important to understand some of CockroachDB's terminology: - - - A **Cluster** acts as a single logical database, but is actually made up of many cooperating nodes. - - **Nodes** are single instances of the `cockroach` binary running on a machine. It's possible (though atypical) to have multiple nodes running on a single machine. - -### Using This Guide - -To diagnose issues, we recommend beginning with the simplest scenario and then increasing its complexity until you discover the problem. With that strategy in mind, you should proceed through these troubleshooting steps sequentially. - -We also recommend executing these steps in the environment where you want to deploy your CockroachDB cluster. However, if you run into issues you cannot solve, try the same steps in a simpler environment. For example, if you cannot successfully start a cluster using Docker, try deploying CockroachDB in the same environment without using containers. - -## Locate Your Issue - -Proceed through the following steps until you locate the source of the issue with starting or scaling your CockroachDB cluster. - -### 1. Start a Single-Node Cluster - -1. Terminate any running `cockroach` processes and remove any old data: - - ~~~ shell - $ pkill -9 cockroach - $ rm -r testStore - ~~~ - -2. Start a single insecure node and log all activity to your terminal: - - ~~~ shell - $ cockroach start --insecure --logtostderr --store=testStore - ~~~ - - Errors at this stage potentially include: - - CPU incompatibility - - Other services running on port `26257` or `8080` (CockroachDB's default `port` and `http-port` respectively). You can either stop those services or start your node with different ports, specified with the [`--port` and `--http-port`](start-a-node.html#flags). - - If you change the port, you will need to include the `--port=[specified port]` flag in each subsequent `cockroach` command or change the `COCKROACH_PORT` environment variable. - - Networking issues that prevent the node from communicating with itself on its hostname. You can control the hostname CockroachDB uses with the [`--host` flag](start-a-node.html#flags). - - If you change the host, you will need to include `--host=[specified host]` in each subsequent `cockroach` command. - -3. If the node appears to have started successfully, open a new terminal window, and attempt to execute the following SQL statement: - - ~~~ shell - $ cockroach sql --insecure -e "SHOW DATABASES" - ~~~ - - You should receive a response that looks similar to this: - - ~~~ - +--------------------+ - |      Database      | - +--------------------+ - | crdb_internal      | - | information_schema | - | pg_catalog         | - | system             | - +--------------------+ - ~~~ - - Errors at this stage potentially include: - - `getsockopt: connection refused`, which indicates you have not included some flag that you used to start the node (e.g., `--port` or `--host`). We have additional troubleshooting steps for this error [here](common-errors.html#getsockopt-connection-refused-error). - - The node crashed. You can identify if this is the case by looking for the `cockroach` process through `ps`. If you cannot locate the `cockroach` process (i.e., it crashed), [file an issue](file-an-issue.html). - -**Next step**: If you successfully completed these steps, try starting a multi-node cluster. - -### 2. Start a Multi-Node Cluster - -1. Terminate any running `cockroach` processes and remove any old data on the additional machines:: - - ~~~ shell - $ pkill -9 cockroach - $ rm -r testStore - ~~~ - - {{site.data.alerts.callout_info}}If you're running all nodes on the same machine, skip this step. Running this command will kill your first node making it impossible to proceed.{{site.data.alerts.end}} - -2. On each machine, start the CockroachDB node, joining it to the first node: - - ~~~ shell - $ cockroach start --insecure --logtostderr --store=testStore \ - --join=[first node's host] - ~~~ - - {{site.data.alerts.callout_info}}If you're running all nodes on the same machine, you will need to change the --port, --http-port, and --store flags. For an example of this, see Start a Local Cluster.{{site.data.alerts.end}} - - Errors at this stage potentially include: - - The same port and host issues from [running a single node](#1-start-a-single-node-cluster). - - [Networking issues](#networking-troubleshooting) - - [Nodes not joining the cluster](#node-will-not-join-cluster) - -3. Visit the Admin UI on any node at `http://[node host]:8080`. All nodes in the cluster should be listed and have data replicated onto them. - - Errors at this stage potentially include: - - [Networking issues](#networking-troubleshooting) - - [Nodes not receiving data](#replication-error-in-a-multi-node-cluster) - -**Next step**: If you successfully completed these steps, try [securing your deployment](manual-deployment.html) (*troubleshooting docs for this coming soon*) or reviewing our other [support resources](support-resources.html). - -## Troubleshooting Information - -Use the information below to resolve issues you encounter when trying to start or scale your cluster. - -### Networking Troubleshooting - -Most networking-related issues are caused by one of two issues: - -- Firewall rules, which require your network administrator to investigate - -- Inaccessible hostnames on your nodes, which can be controlled with the `--host` and `--advertise-host` flags on [`cockroach start`](start-a-node.html#flags) - -However, to efficiently troubleshoot the issue, it's important to understand where and why it's occurring. We recommend checking the following network-related issues: - -- By default, CockroachDB advertises itself to other nodes using its hostname. If your environment doesn't support DNS or the hostname is not resolvable, your nodes cannot connect to one another. In these cases, you can: - - Change the hostname each node uses to advertises itself with `--advertise-host` - - Set `--host=[node's IP address]` if the IP is a valid interface on the machine - -- Every node in the cluster should be able to `ping` each other node on the hostnames or IP addresses you use in the `--join`, `--host`, or `--advertise-host` flags. - -- Every node should be able to connect to other nodes on the port you're using for CockroachDB (**26257** by default) through `telnet` or `nc`: - - `telnet [other node host] 26257` - - `nc [other node host] 26257` - -Again, firewalls or hostname issues can cause any of these steps to fail. - -### Node Will Not Join Cluster - -When joining a node to a cluster, you might receive one of the following errors: - -~~~ -no resolvers found; use --join to specify a connected node -~~~ - -~~~ -node belongs to cluster {"cluster hash"} but is attempting to connect to a gossip network for cluster {"another cluster hash"} -~~~ - -**Solution**: Disassociate the node from the existing directory where you've stored CockroachDB data. For example, you can do either of the following: - -- Choose a different directory to store the CockroachDB data: - - ~~~ shell - # Store this node's data in [new directory] - $ cockroach start [flags] --store=[new directory] --join=[cluster host]:26257 - ~~~ - -- Remove the existing directory and start a node joining the cluster again: - - ~~~ shell - # Remove the directory - $ rm -r cockroach-data/ - - # Start a node joining the cluster - $ cockroach start [flags] --join=[cluster host]:26257 - ~~~ - -**Explanation**: When starting a node, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server, have quit `cockroach`, and then tried to join another cluster. Because the existing directory's cluster ID doesn't match the new cluster ID, the node cannot join it. - -### Replication Error in a Multi-Node Cluster - -If data is not being replicated to some nodes in the cluster, we recommend checking out the following: - -- Ensure every node but the first was started with the `--join` flag set to the hostname and port of first node (or any other node that's successfully joined the cluster). - - If the flag was not set correctly for a node, shut down the node and restart it with the `--join` flag set correctly. See [Stop a Node](stop-a-node.html) and [Start a Node](start-a-node.html) for more details. - -- Nodes might not be able to communicate on their advertised hostnames, even though they're able to connect. - - You can try to resolve this by [stopping the nodes](stop-a-node.html), and then [restarting them](start-a-node.html) with the `--advertise-host` flag set to an interface all nodes can access. - -- Check the [logs](debug-and-error-logs.html) for each node for further detail, as well as these common errors: - - - `connection refused`: [Troubleshoot your network](#networking-troubleshooting). - - `not connected to cluster` or `node [id] belongs to cluster...`: See [Node Will Not Join Cluster](#node-will-not-join-cluster) on this page. - -## Something Else? - -If we do not have a solution here, you can try using our other [support resources](support-resources.html), including: - -- [CockroachDB Community Forum](https://forum.cockroachlabs.com) -- [CockroachDB Community Slack](https://cockroachdb.slack.com) -- [StackOverflow](http://stackoverflow.com/questions/tagged/cockroachdb) -- [CockroachDB Support Portal](https://support.cockroachlabs.com) diff --git a/src/current/v1.0/cockroach-commands.md b/src/current/v1.0/cockroach-commands.md deleted file mode 100644 index 53934d54460..00000000000 --- a/src/current/v1.0/cockroach-commands.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Cockroach Commands -summary: Learn the commands for configuring, starting, and managing a CockroachDB cluster. -toc: true ---- - -This page introduces the `cockroach` commands for configuring, starting, and managing a CockroachDB cluster, as well as logging flags that can be set on any command and environment variables that can be used in place of certain flags. - -You can run `cockroach help` in your shell to get similar guidance. - - -## Commands - -Command | Usage ---------|---- -[`start`](start-a-node.html) | Start a node. -[`cert`](create-security-certificates.html) | Create CA, node, and client certificates. -[`quit`](stop-a-node.html) | Drain and shutdown a node. -[`sql`](use-the-built-in-sql-client.html) | Use the built-in SQL client. -[`user`](create-and-manage-users.html) | Get, set, list, and remove users. -[`zone`](configure-replication-zones.html) | Configure the number and location of replicas for specific sets of data. -[`node`](view-node-details.html) | List node IDs and show their status. -[`dump`](sql-dump.html) | Back up a table by outputting the SQL statements required to recreate the table and all its rows. -[`debug zip`](debug-zip.html) | Generate a `.zip` file that can help Cockroach Labs troubleshoot issues with your cluster. -[`gen`](generate-cockroachdb-resources.html) | Generate manpages, a bash completion file, and example data. -[`version`](view-version-details.html) | Output CockroachDB version and dependency details. - -## Environment Variables - -For many common `cockroach` flags, such as `--port` and `--user`, you can set environment variables once instead of manually passing the flags each time you execute commands. - -- To find out which flags support environment variables, see the documentation for each [command](#commands). -- To output the current configuration of CockroachDB and other environment variables, run `env`. -- When a node uses environment variables on [startup](start-a-node.html), the variable names are printed to the node's logs; however, the variable values are not. - -CockroachDB prioritizes command flags, environment variables, and defaults as follows: - -1. If a flag is set for a command, CockroachDB uses it. -2. If a flag is not set for a command, CockroachDB uses the corresponding environment variable. -3. If neither the flag nor environment variable is set, CockroachDB uses the default for the flag. -4. If there's no flag default, CockroachDB gives an error. diff --git a/src/current/v1.0/cockroachdb-architecture.md b/src/current/v1.0/cockroachdb-architecture.md deleted file mode 100644 index 40782b72539..00000000000 --- a/src/current/v1.0/cockroachdb-architecture.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: CockroachDB Architecture -summary: Explore the architecture of CockroachDB. -toc: false -feedback: false ---- - -You can find our architecture guide starting with the [CockroachDB 1.1 release](../v1.1/architecture/overview.html). While some details have changed between the two versions, most of the content remains applicable. diff --git a/src/current/v1.0/cockroachdb-in-comparison.md b/src/current/v1.0/cockroachdb-in-comparison.md deleted file mode 100644 index f105fb7d3db..00000000000 --- a/src/current/v1.0/cockroachdb-in-comparison.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: CockroachDB in Comparison -summary: Learn how CockroachDB compares to other popular databases like PostgreSQL, Cassandra, MongoDB, Google Cloud Spanner, and more. -tags: mongodb, mysql, dynamodb -toc: false -comparison: true ---- - -This page shows you how key features of CockroachDB stack up against other databases. Hover over features for their intended meanings, and click CockroachDB answers to view related documentation. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - CockroachDB
- Automated Scaling - - tooltip icon - - - No - Yes - - No - Yes - Yes
- Automated Failover - - tooltip icon - - - Optional - Yes - - Optional - Yes - Yes
- Automated Repair - - tooltip icon - - - No - Yes - - No - Yes - Yes
- Strongly Consistent Replication - - tooltip icon - - - No - Optional - Yes - - No - Optional - Yes - Yes
- Consensus-Based Replication - - tooltip icon - - - No - Optional - Yes - - No - Optional - Yes - Yes
- Distributed Transactions - - tooltip icon - - - No - Yes - No* - - No - Yes - No* - Yes
- ACID Semantics - - tooltip icon - - - Yes - No - Row-only - Row-only* - Document-only - - Yes - No - Row-only - Row-only* - Document-only - Yes
- Eventually Consistent Reads - - tooltip icon - - - Yes - - Yes - No
- SQL - - tooltip icon - - - Yes - No - Read-only - - Yes - No - Read-only - Yes
- Open Source - - tooltip icon - - - Yes - No - - Yes - No - Yes
- Commercial Version - - tooltip icon - - - Optional - No - Yes - - Optional - No - Yes - Optional
- Support - - tooltip icon - - - Full - - Full - Full
- - diff --git a/src/current/v1.0/collate.md b/src/current/v1.0/collate.md deleted file mode 100644 index c2aade174eb..00000000000 --- a/src/current/v1.0/collate.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: COLLATE -summary: The COLLATE feature lets you sort strings according to language- and country-specific rules. -toc: true ---- - -The `COLLATE` feature lets you sort [`STRING`](string.html) values according to language- and country-specific rules, known as collations. - -Collated strings are important because different languages have [different rules for alphabetic order](https://en.wikipedia.org/wiki/Alphabetical_order#Language-specific_conventions), especially with respect to accented letters. For example, in German accented letters are sorted with their unaccented counterparts, while in Swedish they are placed at the end of the alphabet. A collation is a set of rules used for ordering and usually corresponds to a language, though some languages have multiple collations with different rules for sorting; for example Portuguese has separate collations for Brazilian and European dialects (`pt-BR` and `pt-PT` respectively). - - -## Details - -- Operations on collated strings cannot involve strings with a different collation or strings with no collation. However, it is possible to add or overwrite a collation on the fly. - -- Only use the collation feature when you need to sort strings by a specific collation. We recommend this because every time a collated string is constructed or loaded into memory, CockroachDB computes its collation key, whose size is linear in relationship to the length of the collated string, which requires additional resources. - -- Collated strings can be considerably larger than the corresponding uncollated strings, depending on the language and the string content. For example, strings containing the character `é` produce larger collation keys in the French locale than in Chinese. - -- Collated strings that are indexed require additional disk space as compared to uncollated strings. In case of indexed collated strings, collation keys must be stored in addition to the strings from which they are derived, creating a constant factor overhead. - -## Supported Collations - -CockroachDB supports the collations provided by Go's [language package](https://godoc.org/golang.org/x/text/language#Tag). The `` argument is the BCP 47 language tag at the end of each line, immediately preceded by `//`. For example, Afrikaans is supported as the `af` collation. - -## SQL Syntax - -Collated strings are used as normal strings in SQL, but have a `COLLATE` clause appended to them. - -- **Column syntax**: `STRING COLLATE `. For example: - - ~~~ sql - > CREATE TABLE foo (a STRING COLLATE en PRIMARY KEY); - ~~~ - - {{site.data.alerts.callout_info}}You can also use any of the aliases for STRING.{{site.data.alerts.end}} - -- **Value syntax**: ` COLLATE `. For example: - - ~~~ sql - > INSERT INTO foo VALUES ('dog' COLLATE en); - ~~~ - -## Examples - -### Specify Collation for a Column - -You can set a default collation for all values in a `STRING` column. - -For example, you can set a column's default collation to German (`de`): - -~~~ sql -> CREATE TABLE de_names (name STRING COLLATE de PRIMARY KEY); -~~~ - -When inserting values into this column, you must specify the collation for every value: - -~~~ sql -> INSERT INTO de_names VALUES ('Backhaus' COLLATE de), ('Bär' COLLATE de), ('Baz' COLLATE de); -~~~ - -The sort will now honor the `de` collation that treats *ä* as *a* in alphabetic sorting: - -~~~ sql -> SELECT * FROM de_names ORDER BY name; -~~~ -~~~ -+----------+ -| name | -+----------+ -| Backhaus | -| Bär | -| Baz | -+----------+ -~~~ - -### Order by Non-Default Collation - -You can sort a column using a specific collation instead of its default. - -For example, you receive different results if you order results by German (`de`) and Swedish (`sv`) collations: - -~~~ sql -> SELECT * FROM de_names ORDER BY name COLLATE sv; -~~~ -~~~ -+----------+ -| name | -+----------+ -| Backhaus | -| Baz | -| Bär | -+----------+ -~~~ - -### Ad-Hoc Collation Casting - -You can cast any string into a collation on the fly. - -~~~ sql -> SELECT 'A' COLLATE de < 'Ä' COLLATE de; -~~~ -~~~ -true -~~~ - -However, you cannot compare values with different collations: - -~~~ sql -SELECT 'Ä' COLLATE sv < 'Ä' COLLATE de; -~~~ -~~~ -pq: unsupported comparison operator: < -~~~ - -You can also use casting to remove collations from values. - -~~~ sql -> SELECT CAST(name AS STRING) FROM de_names ORDER BY name; -~~~ - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/column-families.md b/src/current/v1.0/column-families.md deleted file mode 100644 index b8b0ab67681..00000000000 --- a/src/current/v1.0/column-families.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Column Families -summary: A column family is a group of columns in a table that are stored as a single key-value pair in the underlying key-value store. -toc: true ---- - -A column family is a group of columns in a table that are stored as a single key-value pair in the underlying key-value store. Column families reduce the number of keys stored in the key-value store, resulting in improved performance during [`INSERT`](insert.html), [`UPDATE`](update.html), and [`DELETE`](delete.html) operations. - -This page explains how CockroachDB organizes columns into families as well as cases in which you might want to manually override the default behavior. - -{{site.data.alerts.callout_info}} -[Secondary indexes](indexes.html) do not respect column families. All secondary indexes store values in a single column family. -{{site.data.alerts.end}} - -## Default Behavior - -When a table is created, all columns are stored as a single column family. - -This default approach ensures efficient key-value storage and performance in most cases. However, when frequently updated columns are grouped with seldom updated columns, the seldom updated columns are nonetheless rewritten on every update. Especially when the seldom updated columns are large, it's more performant to split them into a distinct family. - -## Manual Override - -### Assign Column Families on Table Creation - -To manually assign a column family on [table creation](create-table.html), use the `FAMILY` keyword. - -For example, let's say we want to create a table to store an immutable blob of data (`data BYTES`) with a last accessed timestamp (`last_accessed TIMESTAMP`). Because we know that the blob of data will never get updated, we use the `FAMILY` keyword to break it into a separate column family: - -~~~ sql -> CREATE TABLE test ( - id INT PRIMARY KEY, - last_accessed TIMESTAMP, - data BYTES, - FAMILY f1 (id, last_accessed), - FAMILY f2 (data) -); - -> SHOW CREATE TABLE users; -~~~ - -~~~ -+-------+---------------------------------------------+ -| Table | CreateTable | -+-------+---------------------------------------------+ -| test | CREATE TABLE test ( | -| | id INT NOT NULL, | -| | last_accessed TIMESTAMP NULL, | -| | data BYTES NULL, | -| | CONSTRAINT "primary" PRIMARY KEY (id), | -| | FAMILY f1 (id, last_accessed), | -| | FAMILY f2 (data) | -| | ) | -+-------+---------------------------------------------+ -(1 row) -~~~ - -{{site.data.alerts.callout_info}}Columns that are part of the primary index are always assigned to the first column family. If you manually assign primary index columns to a family, it must therefore be the first family listed in the CREATE TABLE statement.{{site.data.alerts.end}} - -### Assign Column Families When Adding Columns - -When using the [`ALTER TABLE .. ADD COLUMN`](add-column.html) statement to add a column to a table, you can assign the column to a new or existing column family. - -- Use the `CREATE FAMILY` keyword to assign a new column to a **new family**. For example, the following would add a `data2 BYTES` column to the `test` table above and assign it to a new column family: - - ~~~ sql - > ALTER TABLE test ADD COLUMN data2 BYTES CREATE FAMILY f3; - ~~~ - -- Use the `FAMILY` keyword to assign a new column to an **existing family**. For example, the following would add a `name STRING` column to the `test` table above and assign it to family `f1`: - - ~~~ sql - > ALTER TABLE test ADD COLUMN name STRING FAMILY f1; - ~~~ - -- Use the `CREATE IF NOT EXISTS FAMILY` keyword to assign a new column to an **existing family or, if the family doesn't exist, to a new family**. For example, the following would assign the new column to the existing `f1` family; if that family didn't exist, it would create a new family and assign the column to it: - - ~~~ sql - > ALTER TABLE test ADD COLUMN name STRING CREATE IF NOT EXISTS FAMILY f1; - ~~~ - -## Compatibility with Past Releases - -Using the `beta-20160714` release makes your data incompatible with versions earlier than the `beta-20160629` release. - -## See Also - -- [`CREATE TABLE`](create-table.html) -- [`ADD COLUMN`](add-column.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/commit-transaction.md b/src/current/v1.0/commit-transaction.md deleted file mode 100644 index 24a267258c5..00000000000 --- a/src/current/v1.0/commit-transaction.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: COMMIT -summary: Commit a transaction with the COMMIT statement in CockroachDB. -toc: true ---- - -The `COMMIT` [statement](sql-statements.html) commits the current [transaction](transactions.html) or, when using [client-side transaction retries](transactions.html#client-side-transaction-retries), clears the connection to allow new transactions to begin. - -When using [client-side transaction retries](transactions.html#client-side-transaction-retries), statements issued after [`SAVEPOINT cockroach_restart`](savepoint.html) are committed when [`RELEASE SAVEPOINT cockroach_restart`](release-savepoint.html) is issued instead of `COMMIT`. However, you must still issue a `COMMIT` statement to clear the connection for the next transaction. - -For non-retryable transactions, if statements in the transaction [generated any errors](transactions.html#error-handling), `COMMIT` is equivalent to `ROLLBACK`, which aborts the transaction and discards *all* updates made by its statements. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/commit_transaction.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to commit a transaction. However, privileges are required for each statement within a transaction. - -## Aliases - -In CockroachDB, `END` is an alias for the `COMMIT` statement. - -## Example - -### Commit a Transaction - -How you commit transactions depends on how your application handles [transaction retries](transactions.html#transaction-retries). - -#### Client-Side Retryable Transactions - -When using [client-side transaction retries](transactions.html#client-side-transaction-retries), statements are committed by [`RELEASE SAVEPOINT cockroach_restart`](release-savepoint.html). `COMMIT` itself only clears the connection for the next transaction. - -~~~ sql -> BEGIN; - -> SAVEPOINT cockroach_restart; - -> UPDATE products SET inventory = 0 WHERE sku = '8675309'; - -> INSERT INTO orders (customer, sku, status) VALUES (1001, '8675309', 'new'); - -> RELEASE SAVEPOINT cockroach_restart; - -> COMMIT; -~~~ - -{{site.data.alerts.callout_danger}}This example assumes you're using client-side intervention to handle transaction retries.{{site.data.alerts.end}} - -#### Automatically Retried Transactions - -If you are using transactions that CockroachDB will [automatically retry](transactions.html#automatic-retries) (i.e., all statements sent in a single batch), commit the transaction with `COMMIT`. - -~~~ sql -> BEGIN; UPDATE products SET inventory = 100 WHERE = '8675309'; UPDATE products SET inventory = 100 WHERE = '8675310'; COMMIT; -~~~ - -## See Also - -- [Transactions](transactions.html) -- [`BEGIN`](begin-transaction.html) -- [`RELEASE SAVEPOINT`](release-savepoint.html) -- [`ROLLBACK`](rollback-transaction.html) -- [`SAVEPOINT`](savepoint.html) diff --git a/src/current/v1.0/common-errors.md b/src/current/v1.0/common-errors.md deleted file mode 100644 index 3ffd0c058aa..00000000000 --- a/src/current/v1.0/common-errors.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Common Errors -summary: Learn how to troubleshoot issues with a single-node cluster -toc: true ---- - -This page helps with general troubleshooting steps that apply to many different scenarios, such as not being able to connect to a node. - -{{site.data.alerts.callout_info}}If you cannot find what you're looking for, we also have more detailed troubleshooting steps for specific issues.{{site.data.alerts.end}} - - -## Common Troubleshooting Steps - -If you run into issues with CockroachDB, there are a few steps you can always take: - -- Check your [logs](debug-and-error-logs.html) for errors related to your issue. Logs are generated on a per-node basis, so you must either identify the node where the issue occurred or [collect the logs from all active nodes in your cluster](debug-zip.html). - -- [Stop](stop-a-node.html) and [restart](start-a-node.html) problematic nodes with the `--logtostderr` flag. This option prints logs to your terminal through `stderr`, letting you see all of your cluster's activities as they occur. - -## Common Errors - -### `getsockopt: connection refused` Error - -This error indicates that the `cockroach` binary is either not running or is not listening on the interfaces (i.e., hostname or port) you specified. - -To resolve this issue, you must do one of the following: - -- [Start your CockroachDB node](start-a-node.html). -- If you specified a `--host` flag when starting your node, you must include it with all other [`cockroach` commands](cockroach-commands.html). -- If you specified a `--port` flag when starting your node, you must include it with all other [`cockroach` commands](cockroach-commands.html) or change the `COCKROACH_PORT` environment variable. - -If you're not sure what the `--host` and `--port` values might have been, you can end the `cockroach` process, and then restart the node: - -~~~ shell -$ pkill cockroach -$ cockroach start [flags] -~~~ - -### Replication Error in a Single-Node Cluster - -When running a single-node CockroachDB cluster, an error about replicas failing will eventually show up in the node's log files, for example: - -~~~ shell -E160407 09:53:50.337328 storage/queue.go:511 [replicate] 7 replicas failing with "0 of 1 store with an attribute matching []; likely not enough nodes in cluster" -~~~ - -This error occurs because CockroachDB expects three nodes by default. If you do not intend to add additional nodes, you can stop this error by updating your default zone configuration to expect only one node: - -~~~ shell -# Insecure cluster: -$ cockroach zone set .default --insecure --disable-replication - -# Secure cluster: -$ cockroach zone set .default --certs-dir=[path to certs directory] --disable-replication -~~~ - -The `--disable-replication` flag automatically reduces the zone's replica count to 1, but you can do this manually as well: - -~~~ shell -# Insecure cluster: -$ echo 'num_replicas: 1' | cockroach zone set .default --insecure -f - - -# Secure cluster: -$ echo 'num_replicas: 1' | cockroach zone set .default --certs-dir=[path to certs directory] -f - -~~~ - -See [Configure Replication Zones](configure-replication-zones.html) for more details. - -## Something Else? - -If we do not have a solution here, you can try using our other [support resources](support-resources.html), including: - -- [CockroachDB Community Forum](https://forum.cockroachlabs.com) -- [CockroachDB Community Slack](https://cockroachdb.slack.com) -- [StackOverflow](http://stackoverflow.com/questions/tagged/cockroachdb) -- [CockroachDB Support Portal](https://support.cockroachlabs.com) diff --git a/src/current/v1.0/configure-replication-zones.md b/src/current/v1.0/configure-replication-zones.md deleted file mode 100644 index a966c8d7a93..00000000000 --- a/src/current/v1.0/configure-replication-zones.md +++ /dev/null @@ -1,588 +0,0 @@ ---- -title: Configure Replication Zones -summary: In CockroachDB, you use replication zones to control the number and location of replicas for specific sets of data. -keywords: ttl, time to live, availability zone -toc: true ---- - -In CockroachDB, you use **replication zones** to control the number and location of replicas for specific sets of data, both when replicas are first added and when they are rebalanced to maintain cluster equilibrium. Initially, there is a single, default replication zone for the entire cluster. You can adjust this default zone as well as add zones for individual databases and tables as needed. For example, you might use the default zone to replicate most data in a cluster normally within a single datacenter, while creating a specific zone to more highly replicate a certain database or table across multiple datacenters and geographies. - -This page explains how replication zones work and how to use the `cockroach zone` [command](cockroach-commands.html) to configure them. - -{{site.data.alerts.callout_info}}Currently, only the root user can configure replication zones.{{site.data.alerts.end}} - - -## Overview - -### Replication Zone Levels - -There are three replication zone levels: - -- **Cluster:** CockroachDB comes with a single, default replication zone for the entire cluster. See [View the Default Replication Zone](#view-the-default-replication-zone) and [Edit the Default Replication Zone](#edit-the-default-replication-zone) for more details. -- **Database:** You can add replication zones for specific databases. See [Create a Replication Zone for a Database](#create-a-replication-zone-for-a-database) for more details. -- **Table:** You can add replication zones for specific tables. See [Create a Replication Zone for a Table](#create-a-replication-zone-for-a-table) for more details. - -When replicating a piece of data, CockroachDB uses the most granular zone available: If there's a replication zone for the table containing the data, CockroachDB uses it; otherwise, it uses the replication zone for the database containing the data. If there's no applicable table or database replication zone, CockroachDB uses the cluster-wide replication zone. - -In addition to the databases and tables that are visible via SQL, CockroachDB stores additional internal data in what are called system ranges. You can configure replication zones for parts of these internal data ranges if you'd like to override the cluster-wide settings. See [Create a Replication Zone for System Ranges](#create-a-replication-zone-for-system-ranges) for more details. - -### Replication Zone Format - -A replication zone is specified in [YAML](https://en.wikipedia.org/wiki/YAML) format and looks like this: - -~~~ yaml -range_min_bytes: -range_max_bytes: -gc: - ttlseconds: -num_replicas: -constraints: [comma-separated constraint list] -~~~ - -Field | Description -------|------------ -`range_min_bytes` | Not yet implemented. -`range_max_bytes` | The maximum size, in bytes, for a range of data in the zone. When a range reaches this size, CockroachDB will split it into two ranges.

**Default:** `67108864` (64MiB) -`ttlseconds` | The number of seconds overwritten values will be retained before garbage collection. Smaller values can save disk space if values are frequently overwritten; larger values increase the range allowed for `AS OF SYSTEM TIME` queries, also know as [Time Travel Queries](select.html#select-historical-data-time-travel).

It is not recommended to set this below `600` (10 minutes); doing so will cause problems for long-running queries. Also, since all versions of a row are stored in a single range that never splits, it is not recommended to set this so high that all the changes to a row in that time period could add up to more than 64MiB; such oversized ranges could contribute to the server running out of memory or other problems.

**Default:** `86400` (24 hours) -`num_replicas` | The number of replicas in the zone.

**Default:** `3` -`constraints` | A comma-separated list of required and/or prohibited constraints influencing the location of replicas. See [Constraints in Replication Zones](#constraints-in-replication-zones) for more details.

**Default:** No constraints, with CockroachDB locating each replica on a unique node and attempting to spread replicas evenly across localities. - -### Replication Constraints - -The location of replicas, both when they are first added and when they are rebalanced to maintain cluster equilibrium, is based on the interplay between descriptive attributes assigned to nodes and constraints set in zone configurations. - -{{site.data.alerts.callout_success}}For demonstrations of how to set node attributes and replication constraints in different scenarios, see Scenario-based Examples below.{{site.data.alerts.end}} - -#### Descriptive Attributes Assigned to Nodes - -When starting a node with the [`cockroach start`](start-a-node.html) command, you can assign the following types of descriptive attributes: - -Attribute Type | Description ----------------|------------ -**Node Locality** | Using the `--locality` flag, you can assign arbitrary key-value pairs that describe the locality of the node. Locality might include country, region, datacenter, rack, etc. CockroachDB attempts to spread replicas evenly across the cluster based on locality.

The key-value pairs should be ordered from most inclusive to least inclusive. For example, a country locality should be specified before datacenter, which should in turn be specified before rack. Also, the keys and the order of key-value pairs must be the same on all nodes, and it's typically better to include more pairs than fewer. For example:

`--locality=region=east,datacenter=us-east-1`
`--locality=region=east,datacenter=us-east-2`
`--locality=region=west,datacenter=us-west-1` -**Node Capability** | Using the `--attrs` flag, you can specify node capability, which might include specialized hardware or number of cores, for example:

`--attrs=ram:64gb` -**Store Type/Capability** | Using the `attrs` field of the `--store` flag, you can specify disk type or capability, for example:

`--store=path=/mnt/ssd01,attrs=ssd`
`--store=path=/mnt/hda1,attrs=hdd:7200rpm` - -#### Constraints in Replication Zones - -The node-level and store-level descriptive attributes mentioned above can be used as the following types of constraints in replication zones to influence the location of replicas. However, note the following general guidance: - -- When locality is the only consideration for replication, it's recommended to set locality on nodes without specifying any constraints in zone configurations. In the absence of constraints, CockroachDB attempts to spread replicas evenly across the cluster based on locality. -- Required and prohibited constraints are useful in special situations where, for example, data must or must not be stored in a specific country or on a specific type of machine. - -Constraint Type | Description | Syntax -----------------|-------------|------- -**Required** | When placing replicas, the cluster will consider only nodes/stores with matching attributes. When there are no matching nodes/stores with capacity, new replicas will not be added. | `[+ssd]` -**Prohibited** | When placing replicas, the cluster will ignore nodes/stores with matching attributes. When there are no alternate nodes/stores with capacity, new replicas will not be added. | `[-ssd]` - -### Node/Replica Recommendations - -See [Cluster Topography](recommended-production-settings.html#cluster-topology) recommendations for production deployments. - -## Subcommands - -Subcommand | Usage ------------|------ -`ls` | List all replication zones. -`get` | View the YAML contents of a replication zone. -`set` | Create or edit a replication zone. -`rm` | Remove a replication zone. - -## Synopsis - -~~~ shell -# List all replication zones: -$ cockroach zone ls - -# View the default replication zone for the cluster: -$ cockroach zone get .default - -# View the replication zone for a database: -$ cockroach zone get - -# View the replication zone for a table: -$ cockroach zone get - -# Edit the default replication zone for the cluster: -$ cockroach zone set .default --file= - -# Create/edit the replication zone for a database: -$ cockroach zone set --file= - -# Create/edit the replication zone for a table: -$ cockroach zone set --file= - -# Remove the replication zone for a database: -$ cockroach zone rm - -# Remove the replication zone for a table: -$ cockroach zone rm - -# View help: -$ cockroach zone --help -$ cockroach zone ls --help -$ cockroach zone get --help -$ cockroach zone set --help -$ cockroach zone rm --help -~~~ - -## Flags - -The `zone` command and subcommands support the following [general-use](#general) and [logging](#logging) flags. - -### General - -Flag | Description ------|------------ -`--certs-dir` | The path to the [certificate directory](create-security-certificates.html). The directory must contain valid certificates if running in secure mode.

**Env Variable:** `COCKROACH_CERTS_DIR`
**Default:** `${HOME}/.cockroach-certs/` -`--database`
`-d` | Not currently implemented. -`--disable-replication` | Disable replication in the zone by setting the zone's replica count to 1. This is equivalent to setting `num_replicas: 1`. -`--file`
`-f` | The path to the [YAML file](#replication-zone-format) defining the zone configuration. To pass the zone configuration via the standard input, set this flag to `-`.

This flag is relevant only for the `set` subcommand. -`--host` | The server host to connect to. This can be the address of any node in the cluster.

**Env Variable:** `COCKROACH_HOST`
**Default:** `localhost` -`--insecure` | Run in insecure mode. If this flag is not set, the `--certs-dir` flag must point to valid certificates.

**Env Variable:** `COCKROACH_INSECURE`
**Default:** `false` -`--port`
`-p` | The server port to connect to.

**Env Variable:** `COCKROACH_PORT`
**Default:** `26257` -`--url` | The connection URL. If you use this flag, do not set any other connection flags.

For insecure connections, the URL format is:
`--url=postgresql://@:/?sslmode=disable`

For secure connections, the URL format is:
`--url=postgresql://@:/`
with the following parameters in the query string:
`sslcert=`
`sslkey=`
`sslmode=verify-full`
`sslrootcert=`

**Env Variable:** `COCKROACH_URL` -`--user`
`-u` | The user connecting to the database. Currently, only the `root` user can configure replication zones.

**Env Variable:** `COCKROACH_USER`
**Default:** `root` - -### Logging - -By default, the `zone` command logs errors to `stderr`. - -If you need to troubleshoot this command's behavior, you can change its [logging behavior](debug-and-error-logs.html). - -## Basic Examples - -These examples focus on the basic approach and syntax for working with zone configuration. For examples demonstrating how to use constraints, see [Scenario-based Examples](#scenario-based-examples). - -### View the Default Replication Zone - -The cluster-wide replication zone (`.default`) is initially set to replicate data to any three nodes in your cluster, with ranges in each replica splitting once they get larger than 67108864 bytes. - -To view the default replication zone, use the `cockroach zone get .default` command with appropriate flags: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach zone get .default --insecure -~~~ - -~~~ -.default -range_min_bytes: 1048576 -range_max_bytes: 67108864 -gc: - ttlseconds: 86400 -num_replicas: 3 -constraints: [] -~~~ - -### Edit the Default Replication Zone - -To edit the default replication zone, create a YAML file defining only the values you want to change (other values will not be affected), and use the `cockroach zone set .default -f ` command with appropriate flags: - -{% include copy-clipboard.html %} -~~~ shell -$ cat default_update.yaml -~~~ - -~~~ -num_replicas: 5 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach zone set .default --insecure -f default_update.yaml -~~~ - -~~~ -range_min_bytes: 1048576 -range_max_bytes: 67108864 -gc: - ttlseconds: 86400 -num_replicas: 5 -constraints: [] -~~~ - -Alternately, you can pass the YAML content via the standard input: - -{% include copy-clipboard.html %} -~~~ shell -$ echo 'num_replicas: 5' | cockroach zone set .default --insecure -f - -~~~ - -### Create a Replication Zone for a Database - -To control replication for a specific database, create a YAML file defining only the values you want to change (other values will not be affected), and use the `cockroach zone set -f ` command with appropriate flags: - -{% include copy-clipboard.html %} -~~~ shell -$ cat database_zone.yaml -~~~ - -~~~ -num_replicas: 7 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach zone set db1 --insecure -f database_zone.yaml -~~~ - -~~~ -range_min_bytes: 1048576 -range_max_bytes: 67108864 -gc: - ttlseconds: 86400 -num_replicas: 5 -constraints: [] -~~~ - -Alternately, you can pass the YAML content via the standard input: - -{% include copy-clipboard.html %} -~~~ shell -$ echo 'num_replicas: 5' | cockroach zone set db1 --insecure -f - -~~~ - -### Create a Replication Zone for a Table - -To control replication for a specific table, create a YAML file defining only the values you want to change (other values will not be affected), and use the `cockroach zone set -f ` command with appropriate flags: - -{% include copy-clipboard.html %} -~~~ shell -$ cat table_zone.yaml -~~~ - -~~~ -num_replicas: 7 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach zone set db1.t1 --insecure -f table_zone.yaml -~~~ - -~~~ -range_min_bytes: 1048576 -range_max_bytes: 67108864 -gc: - ttlseconds: 86400 -num_replicas: 7 -constraints: [] -~~~ - -Alternately, you can pass the YAML content via the standard input: - -{% include copy-clipboard.html %} -~~~ shell -$ echo 'num_replicas: 7' | cockroach zone set db1.t1 --insecure -f - -~~~ - -### Create a Replication Zone for System Ranges - -In addition to the databases and tables that are visible via the SQL interface, CockroachDB stores additional data in what are called system ranges. There are three categories of system ranges for which replication zones can be set: - -Zone Name | Description -----------|------------ -**.meta** | The "meta" ranges contain the authoritative information about the location of all data in the cluster. If your cluster is running in multiple datacenters, it's a best practice to configure the meta ranges to have a copy in each datacenter. -**.system** | The ".system" zone config controls the replication of a variety of important internal data, including information needed to allocate new table IDs and track the health of a cluster's nodes. -**.timeseries** | The "timeseries" ranges contain monitoring data about the cluster that powers the graphs in CockroachDB's admin UI. - -To control replication for one of the above sets of system ranges, create a YAML file defining only the values you want to change (other values will not be affected), and use the `cockroach zone set -f ` command with appropriate flags: - -{% include copy-clipboard.html %} -~~~ shell -$ cat meta_zone.yaml -~~~ - -~~~ -num_replicas: 7 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach zone set .meta --insecure -f meta_zone.yaml -~~~ - -~~~ -range_min_bytes: 1048576 -range_max_bytes: 67108864 -gc: - ttlseconds: 86400 -num_replicas: 7 -constraints: [] -~~~ - -Alternately, you can pass the YAML content via the standard input: - -{% include copy-clipboard.html %} -~~~ shell -$ echo 'num_replicas: 7' | cockroach zone set .meta --insecure -f - -~~~ - -## Scenario-based Examples - -### Even Replication Across Datacenters - -**Scenario:** - -- You have 6 nodes across 3 datacenters, 2 nodes in each datacenter. -- You want data replicated 3 times, with replicas balanced evenly across all three datacenters. - -**Approach:** - -Start each node with its datacenter location specified in the `--locality` flag: - -~~~ shell -# Start the two nodes in datacenter 1: -$ cockroach start --insecure --host= --locality=datacenter=us-1 -$ cockroach start --insecure --host= --locality=datacenter=us-1 \ ---join=:26257 - -# Start the two nodes in datacenter 2: -$ cockroach start --insecure --host= --locality=datacenter=us-2 \ ---join=:26257 -$ cockroach start --insecure --host= --locality=datacenter=us-2 \ ---join=:26257 - -# Start the two nodes in datacenter 3: -$ cockroach start --insecure --host= --locality=datacenter=us-3 \ ---join=:26257 -$ cockroach start --insecure --host= --locality=datacenter=us-3 \ ---join=:26257 -~~~ - -There's no need to make zone configuration changes; by default, the cluster is configured to replicate data three times, and even without explicit constraints, the cluster will aim to diversify replicas across node localities. - -### Multiple Applications Writing to Different Databases - -**Scenario:** - -- You have 2 independent applications connected to the same CockroachDB cluster, each application using a distinct database. -- You have 6 nodes across 2 datacenters, 3 nodes in each datacenter. -- You want the data for application 1 to be replicated 5 times, with replicas evenly balanced across both datacenters. -- You want the data for application 2 to be replicated 3 times, with all replicas in a single datacenter. - -**Approach:** - -1. Start each node with its datacenter location specified in the `--locality` flag: - - ~~~ shell - # Start the three nodes in datacenter 1: - $ cockroach start --insecure --host= --locality=datacenter=us-1 - $ cockroach start --insecure --host= --locality=datacenter=us-1 \ - --join=:26257 - $ cockroach start --insecure --host= --locality=datacenter=us-1 \ - --join=:26257 - - # Start the three nodes in datacenter 2: - $ cockroach start --insecure --host= --locality=datacenter=us-2 \ - --join=:26257 - $ cockroach start --insecure --host= --locality=datacenter=us-2 \ - --join=:26257 - $ cockroach start --insecure --host= --locality=datacenter=us-2 \ - --join=:26257 - ~~~ - -2. On any node, configure a replication zone for the database used by application 1: - - {% include copy-clipboard.html %} - ~~~ shell - # Create a YAML file with the replica count set to 5: - $ cat app1_zone.yaml - ~~~ - - ~~~ - num_replicas: 5 - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Apply the replication zone to the database used by application 1: - $ cockroach zone set app1_db --insecure -f app1_zone.yaml - ~~~ - - ~~~ - range_min_bytes: 1048576 - range_max_bytes: 67108864 - gc: - ttlseconds: 86400 - num_replicas: 5 - constraints: [] - ~~~ - Nothing else is necessary for application 1's data. Since all nodes specify their datacenter locality, the cluster will aim to balance the data in the database used by application 1 between datacenters 1 and 2. - -3. On any node, configure a replication zone for the database used by application 2: - - {% include copy-clipboard.html %} - ~~~ shell - # Create a YAML file with 1 datacenter as a required constraint: - $ cat app2_zone.yaml - ~~~ - - ~~~ - constraints: [+datacenter=us-2] - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Apply the replication zone to the database used by application 2: - $ cockroach zone set app2_db --insecure -f app2_zone.yaml - ~~~ - - ~~~ - range_min_bytes: 1048576 - range_max_bytes: 67108864 - gc: - ttlseconds: 86400 - num_replicas: 3 - constraints: [+datacenter=us-2] - ~~~ - The required constraint will force application 2's data to be replicated only within the `us-2` datacenter. - -### Stricter Replication for a Specific Table - -**Scenario:** - -- You have 7 nodes, 5 with SSD drives and 2 with HDD drives. -- You want data replicated 3 times by default. -- Speed and availability are important for a specific table that is queried very frequently, however, so you want the data in that table to be replicated 5 times, preferably on nodes with SSD drives. - -**Approach:** - -1. Start each node with `ssd` or `hdd` specified as store attributes: - - ~~~ shell - # Start the 5 nodes with SSD storage: - $ cockroach start --insecure --host= --store=path=node1,attrs=ssd - $ cockroach start --insecure --host= --store=path=node2,attrs=ssd \ - --join=:26257 - $ cockroach start --insecure --host= --store=path=node3,attrs=ssd \ - --join=:26257 - $ cockroach start --insecure --host= --store=path=node4,attrs=ssd \ - --join=:26257 - $ cockroach start --insecure --host= --store=path=node5,attrs=ssd \ - --join=:26257 - - # Start the 2 nodes with HDD storage: - $ cockroach start --insecure --host= --store=path=node6,attrs=hdd \ - --join=:26257 - $ cockroach start --insecure --host= --store=path=node2,attrs=hdd \ - --join=:26257 - ~~~ - -2. On any node, configure a replication zone for the table that must be replicated more strictly: - - {% include copy-clipboard.html %} - ~~~ shell - # Create a YAML file with the replica count set to 5 - # and the ssd attribute as a required constraint: - $ cat table_zone.yaml - ~~~ - - ~~~ - num_replicas: 5 - constraints: [+ssd] - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Apply the replication zone to the table: - $ cockroach zone set db.important_table --insecure -f table_zone.yaml - ~~~ - - ~~~ - range_min_bytes: 1048576 - range_max_bytes: 67108864 - gc: - ttlseconds: 86400 - num_replicas: 5 - constraints: [+ssd] - ~~~ - Data in the table will be replicated 5 times, and the required constraint will place data in the table on nodes with `ssd` drives. - -### Tweaking the Replication of System Ranges - -**Scenario:** - -- You have nodes spread across 7 datacenters. -- You want data replicated 5 times by default. -- For better performance, you want a copy of the meta ranges in all of the datacenters. -- To save disk space, you only want the internal timeseries data replicated 3 times by default. - -**Approach:** - -1. Start each node with a different locality attribute: - - ~~~ shell - $ cockroach start --insecure --host= --locality=datacenter=us-1 - $ cockroach start --insecure --host= --locality=datacenter=us-2 \ - --join=:26257 - $ cockroach start --insecure --host= --locality=datacenter=us-3 \ - --join=:26257 - $ cockroach start --insecure --host= --locality=datacenter=us-4 \ - --join=:26257 - $ cockroach start --insecure --host= --locality=datacenter=us-5 \ - --join=:26257 - $ cockroach start --insecure --host= --locality=datacenter=us-6 \ - --join=:26257 - $ cockroach start --insecure --host= --locality=datacenter=us-7 \ - --join=:26257 - ~~~ - -2. On any node, configure the default replication zone: - - {% include copy-clipboard.html %} - ~~~ shell - echo 'num_replicas: 5' | cockroach zone set .default --insecure -f - - ~~~ - - ~~~ - range_min_bytes: 1048576 - range_max_bytes: 67108864 - gc: - ttlseconds: 86400 - num_replicas: 5 - constraints: [] - ~~~ - - All data in the cluster will be replicated 5 times, including both SQL data and the internal system data. - -3. On any node, configure the `.meta` replication zone: - - {% include copy-clipboard.html %} - ~~~ shell - echo 'num_replicas: 7' | cockroach zone set .meta --insecure -f - - ~~~ - - ~~~ - range_min_bytes: 1048576 - range_max_bytes: 67108864 - gc: - ttlseconds: 86400 - num_replicas: 7 - constraints: [] - ~~~ - - The `.meta` addressing ranges will be replicated such that one copy is in all 7 datacenters, while all other data will be replicated 5 times. - -4. On any node, configure the `.timeseries` replication zone: - - {% include copy-clipboard.html %} - ~~~ shell - echo 'num_replicas: 3' | cockroach zone set .timeseries --insecure -f - - ~~~ - - ~~~ - range_min_bytes: 1048576 - range_max_bytes: 67108864 - gc: - ttlseconds: 86400 - num_replicas: 7 - constraints: [] - ~~~ - - The timeseries data will only be replicated 3 times without affecting the configuration of all other data. - -## See Also - -[Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/constraints.md b/src/current/v1.0/constraints.md deleted file mode 100644 index 9a6392d832d..00000000000 --- a/src/current/v1.0/constraints.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Constraints -summary: Constraints offer additional data integrity by enforcing conditions on the data within a column. -toc: true ---- - -Constraints offer additional data integrity by enforcing conditions on the data within a column. Whenever values are manipulated (inserted, deleted, or updated), constraints are checked and modifications that violate constraints are rejected. - -For example, the Unique constraint requires that all values in a column be unique from one another (except *NULL* values). If you attempt to write a duplicate value, the constraint rejects the entire statement. - - -## Supported Constraints - -| Constraint | Description | -|------------|-------------| -| [Check](check.html) | Values must return `TRUE` or `NULL` for a Boolean expression. | -| [Default Value](default-value.html) | If a value is not defined for the constrained column in an `INSERT` statement, the Default Value is written to the column. | -| [Foreign Keys](foreign-key.html) | Values must exactly match existing values from the column it references. | -| [Not Null](not-null.html) | Values may not be *NULL*. | -| [Primary Key](primary-key.html) | Values must uniquely identify each row *(one per table)*. This behaves as if the Not Null and Unique constraints are applied, as well as automatically creates an [index](indexes.html) for the table using the constrained columns. | -| [Unique](unique.html) | Each non-*NULL* value must be unique. This also automatically creates an [index](indexes.html) for the table using the constrained columns. | - -## Using Constraints - -### Add Constraints - -How you add constraints depends on the number of columns you want to constrain, as well as whether or not the table is new. - -- **One column of a new table** has its constraints defined after the column's data type. For example, this statement applies the Primary Key constraint to `foo.a`: - - ``` sql - > CREATE TABLE foo (a INT PRIMARY KEY); - ``` -- **Multiple columns of a new table** have their constraints defined after the table's columns. For example, this statement applies the Primary Key constraint to `foo`'s columns `a` and `b`: - - ``` sql - > CREATE TABLE bar (a INT, b INT, PRIMARY KEY (a,b)); - ``` - - {{site.data.alerts.callout_info}}The Default Value and Not Null constraints cannot be applied to multiple columns.{{site.data.alerts.end}} - -- **Existing tables** can have the following constraints added: - - **Check**, **Foreign Key**, and **Unique** constraints can be added through [`ALTER TABLE...ADD CONSTRAINT`](add-constraint.html). For example, this statement adds the Unique constraint to `baz.id`: - - ~~~ sql - > ALTER TABLE baz ADD CONSTRAINT id_unique UNIQUE (id); - ~~~ - - - **Default Values** can be added through [`ALTER TABLE...ALTER COLUMN`](alter-column.html#set-or-change-a-default-value). For example, this statement adds the Default Value constraint to `baz.bool`: - - ~~~ sql - > ALTER TABLE baz ALTER COLUMN bool SET DEFAULT true; - ~~~ - - - **Primary Key** and **Not Null** constraints cannot be added or changed. However, you can go through [this process](#table-migrations-to-add-or-change-immutable-constraints) to migrate data from your current table to a new table with the constraints you want to apply. - -#### Order of Constraints - -The order in which you list constraints is not important because constraints are applied to every modification of their respective tables or columns. - -#### Name Constraints on New Tables - -You can name constraints applied to new tables using the `CONSTRAINT` clause before defining the constraint: - -``` sql -> CREATE TABLE foo (a INT CONSTRAINT another_name PRIMARY KEY); - -> CREATE TABLE bar (a INT, b INT, CONSTRAINT yet_another_name PRIMARY KEY (a,b)); -``` - -### View Constraints - -To view a table's constraints, use [`SHOW CONSTRAINTS`](show-constraints.html) or [`SHOW CREATE TABLE`](show-create-table.html). - -### Remove Constraints - -The procedure for removing a constraint depends on its type: - -| Constraint Type | Procedure | -|-----------------|-----------| -| [Check](check.html) | Use [`DROP CONSTRAINT`](drop-constraint.html) | -| [Default Value](default-value.html) | Use [`ALTER COLUMN`](alter-column.html#remove-default-constraint) | -| [Foreign Keys](foreign-key.html) | Use [`DROP CONSTRAINT`](drop-constraint.html) | -| [Not Null](not-null.html) | Use [`ALTER COLUMN`](alter-column.html#remove-not-null-constraint) | -| [Primary Key](primary-key.html) | Primary Keys cannot be removed. However, you can move the table's data to a new table with [this process](#table-migrations-to-add-or-change-immutable-constraints). | -| [Unique](unique.html) | The Unique constraint cannot be dropped directly. However, you can use [`DROP INDEX`](drop-index.html) to remove the index automatically created by the Unique constraint (whose name ends in `_key`) to remove the constraint. | - -### Change Constraints - -The procedure for changing a constraint depends on its type: - -| Constraint Type | Procedure | -|-----------------|-----------| -| [Check](check.html) | [Issue a transaction](transactions.html#syntax) that adds a new Check constraint ([`ADD CONSTRAINT`](add-constraint.html)), and then remove the existing one ([`DROP CONSTRAINT`](drop-constraint.html)). | -| [Default Value](default-value.html) | The Default Value can be changed through [`ALTER COLUMN`](alter-column.html). | -| [Foreign Keys](foreign-key.html) | [Issue a transaction](transactions.html#syntax) that adds a new Foreign Key constraint ([`ADD CONSTRAINT`](add-constraint.html)), and then remove the existing one ([`DROP CONSTRAINT`](drop-constraint.html)). | -| [Not Null](not-null.html) | The Not Null constraint cannot be changed, only removed. However, you can move the table's data to a new table with [this process](#table-migrations-to-add-or-change-immutable-constraints). | -| [Primary Key](primary-key.html) | Primary Keys cannot be modified. However, you can move the table's data to a new table with [this process](#table-migrations-to-add-or-change-immutable-constraints). | -| [Unique](unique.html) | [Issue a transaction](transactions.html#syntax) that adds a new Unique constraint ([`ADD CONSTRAINT`](add-constraint.html)), and then remove the existing one ([`DROP CONSTRAINT`](drop-constraint.html)). | - -#### Table Migrations to Add or Change Immutable Constraints - -If you want to make a change to an immutable constraint, you can use the following process: - -1. [Create a new table](create-table.html) with the constraints you want to apply. -2. Move the data from the old table to the new one using [`INSERT` from a `SELECT` statement](insert.html#insert-from-a-select-statement). -3. [Drop the old table](drop-table.html), and then [rename the new table to the old name](rename-table.html). This cannot be done transactionally. - -## See Also - -- [`CREATE TABLE`](create-table.html) -- [`ADD CONSTRAINT`](add-constraint.html) -- [`DROP CONSTRAINT`](drop-constraint.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) -- [`SHOW CREATE TABLE`](show-create-table.html) diff --git a/src/current/v1.0/create-and-manage-users.md b/src/current/v1.0/create-and-manage-users.md deleted file mode 100644 index 1cfe46d9a48..00000000000 --- a/src/current/v1.0/create-and-manage-users.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Create & Manage Users -summary: To create and manage your cluster's users (which lets you control SQL-level privileges), use the cockroach user command with appropriate flags. -toc: true ---- - -To create, manage, and remove your cluster's users (which lets you control SQL-level [privileges](privileges.html)), use the `cockroach user` [command](cockroach-commands.html) with appropriate flags. - -{{site.data.alerts.callout_success}}You can also use the CREATE USER statement to create users.{{site.data.alerts.end}} - - -## Considerations - -- Usernames are case-insensitive; must start with either a letter or underscore; must contain only letters, numbers, or underscores; and must be between 1 and 63 characters. -- After creating users, you must [grant them privileges to databases and tables](grant.html). -- On secure clusters, you must [create client certificates for users](create-security-certificates.html#create-the-certificate-and-key-pair-for-a-client) and users must [authenticate their access to the cluster](#user-authentication). -- {% include {{ page.version.version }}/misc/remove-user-callout.html %} - -## Subcommands - -Subcommand | Usage ------------|------ -`get` | Retrieve a table containing a user and their hashed password. -`ls` | List all users. -`rm` | Remove a user. -`set` | Create or update a user. - -## Synopsis - -~~~ shell -# Create a user: -$ cockroach user set - -# List all users: -$ cockroach user ls - -# Display a specific user: -$ cockroach user get - -# View help: -$ cockroach user --help -$ cockroach user get --help -$ cockroach user ls --help -$ cockroach user rm --help -$ cockroach user set --help -~~~ - -## Flags - -The `user` command and subcommands support the following [general-use](#general) and [logging](#logging) flags. - -### General - -Flag | Description ------|------------ -`--certs-dir` | The path to the [certificate directory](create-security-certificates.html). The directory must contain valid certificates if running in secure mode.

**Env Variable:** `COCKROACH_CERTS_DIR`
**Default:** `${HOME}/.cockroach-certs/` -`-d`, `--database` | _Deprecated_: Users are created for the entire cluster. However, you can control a user's privileges per database when [granting them privileges](grant.html#grant-privileges-on-databases).

**Env Variable:** `COCKROACH_DATABASE` -`--host` | The server host to connect to. This can be the address of any node in the cluster.

**Env Variable:** `COCKROACH_HOST`
**Default:** `localhost` -`--insecure` | Run in insecure mode. If this flag is not set, the `--certs-dir` flag must point to valid certificates.

**Env Variable:** `COCKROACH_INSECURE`
**Default:** `false` -`--password` | Enable password authentication for the user; you will be prompted to enter the password on the command line.

You cannot set a password for the `root` user.

[Find more detail about how CockroachDB handles passwords](#user-authentication). -`-p`, `--port` | Connect to the cluster on the specified port.

**Env Variable:** `COCKROACH_PORT`
**Default**: `26257` -`--pretty` | Format table rows printed to the standard output using ASCII art and disable escaping of special characters.

When disabled with `--pretty=false`, or when the standard output is not a terminal, table rows are printed as tab-separated values, and special characters are escaped. This makes the output easy to parse by other programs.

**Default:** `true` when output is a terminal, `false` otherwise -`--url` | Connect to the cluster on the provided URL, e.g., `postgresql://myuser@localhost:26257/mydb`. If left blank, the connection flags are used (`host`, `port`, `user`, `database`, `insecure`, `certs`).

**Env Variable:** `COCKROACH_URL` -`-u`, `--user` | _Deprecated_: Only the `root` user can create users, so you cannot pass any other usernames into this flag.

**Env Variable:** `COCKROACH_USER`
**Default**: `root` - -### Logging - -By default, the `user` command logs errors to `stderr`. - -If you need to troubleshoot this command's behavior, you can change its [logging behavior](debug-and-error-logs.html). - -## User Authentication - -Secure clusters require users to authenticate their access to databases and tables. CockroachDB offers two methods for this: - -- [Client certificate and key authentication](#secure-clusters-with-client-certificates), which is available to all users. To ensure the highest level of security, we recommend only using client certificate and key authentication. -- [Password authentication](#secure-clusters-with-passwords), which is available only to users who you've created passwords for. To set a password for a user, include the `--password` flag in the `cockroach user set` command. However, you *cannot* add password authentication to the `root` user.

You can use this password to authenticate users without supplying their client certificate and key; however, we recommend instead using client certificate and key authentication whenever possible. - -{{site.data.alerts.callout_info}}Insecure clusters do not support user authentication, but you can still create passwords for users (besides root) through the --password flag.{{site.data.alerts.end}} - -## Examples - -### Create a User - -#### Insecure Cluster - -~~~ shell -$ cockroach user set jpointsman --insecure -~~~ - -Usernames are case-insensitive; must start with either a letter or underscore; must contain only letters, numbers, or underscores; and must be between 1 and 63 characters. - -After creating users, you must [grant them privileges to databases](grant.html). - -#### Secure Cluster - -~~~ shell -$ cockroach user set jpointsman --certs-dir=certs -~~~ - -{{site.data.alerts.callout_success}}If you want to allow password authentication for the user, include the --password flag and then enter and confirm the password at the command prompt.{{site.data.alerts.end}} - -Usernames are case-insensitive; must start with either a letter or underscore; must contain only letters, numbers, or underscores; and must be between 1 and 63 characters. - -After creating users, you must: - -- [Create their client certificates](create-security-certificates.html#create-the-certificate-and-key-pair-for-a-client). -- [Grant them privileges to databases](grant.html). - -### Authenticate as a Specific User - -#### Insecure Clusters - -~~~ shell -$ cockroach sql --insecure --user=jpointsman -~~~ - -#### Secure Clusters with Client Certificates - -All users can authenticate their access to a secure cluster using [a client certificate](create-security-certificates.html#create-the-certificate-and-key-pair-for-a-client) issued to their username. - -~~~ shell -$ cockroach sql --certs-dir=certs --user=jpointsman -~~~ - -#### Secure Clusters with Passwords - -[Users with passwords](create-and-manage-users.html#secure-cluster) can authenticate their access by entering their password at the command prompt instead of using their client certificate and key. - -If we cannot find client certificate and key files matching the user, we fall back on password authentication. - -~~~ shell -$ cockroach sql --certs-dir=certs --user=jpointsman -~~~ - -### Update a User's Password - -~~~ shell -$ cockroach user set jpointsman --certs-dir=certs --password -~~~ - -After issuing this command, enter and confirm the user's new password at the command prompt. - -{{site.data.alerts.callout_danger}}You cannot add password authentication to the root user.{{site.data.alerts.end}} - -### List All Users - -~~~ shell -$ cockroach user ls --insecure -~~~ -~~~ -+------------+ -| username | -+------------+ -| jpointsman | -+------------+ -~~~ - -### Find a Specific User - -~~~ shell -$ cockroach user get jpointsman --insecure -~~~ -~~~ -+------------+--------------------------------------------------------------+ -| username | hashedPassword | -+------------+--------------------------------------------------------------+ -| jpointsman | $2a$108tm5lYjES9RSXSKtQFLhNO.e/ysTXCBIRe7XeTgBrR6ubXfp6dDczS | -+------------+--------------------------------------------------------------+ -~~~ - -### Remove a User - -~~~ shell -$ cockroach user rm jpointsman --insecure -~~~ - -## See Also - -- [Create Security Certificates](create-security-certificates.html) -- [`GRANT`](grant.html) -- [`SHOW GRANTS`](show-grants.html) -- [`CREATE USER`](create-user.html) -- [Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/create-database.md b/src/current/v1.0/create-database.md deleted file mode 100644 index 4403fabe1b5..00000000000 --- a/src/current/v1.0/create-database.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: CREATE DATABASE -summary: The CREATE DATABASE statement creates a new CockroachDB database. -toc: true ---- - -The `CREATE DATABASE` [statement](sql-statements.html) creates a new CockroachDB database. - - -## Required Privileges - -Only the `root` user can create databases. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/create_database.html %} - -## Parameters - -Parameter | Description -----------|------------ -`IF NOT EXISTS` | Create a new database only if a database of the same name does not already exist; if one does exist, do not return an error. -`name` | The name of the database to create, which [must be unique](#create-fails-name-already-in-use) and follow these [identifier rules](keywords-and-identifiers.html#identifiers). -`encoding` | The `CREATE DATABASE` statement accepts an optional `ENCODING` clause for compatibility with PostgreSQL, but `UTF-8` is the only supported encoding. The aliases `UTF8` and `UNICODE` are also accepted. Values should be enclosed in single quotes and are case insensitive. Example: `CREATE DATABASE bank ENCODING = 'UTF-8'`. - -## Example - - -### Create a Database -~~~ sql -> CREATE DATABASE bank; - -> SHOW DATABASES; -~~~ -~~~ -+----------+ -| Database | -+----------+ -| bank | -| system | -+----------+ -~~~ - - -### Create Fails (Name Already In Use) - -~~~ sql -> SHOW DATABASES; -~~~ -~~~ -+----------+ -| Database | -+----------+ -| bank | -| system | -+----------+ -~~~ -~~~ sql -> CREATE DATABASE bank; -~~~ -~~~ -pq: database "bank" already exists -~~~ -~~~ sql -> SHOW DATABASES; -+----------+ -| Database | -+----------+ -| bank | -| system | -+----------+ -~~~ - -~~~ sql -> CREATE DATABASE IF NOT EXISTS bank; -~~~ - -SQL does not generate an error, but instead responds `CREATE DATABASE` even though a new database wasn't created. - -~~~ sql -> SHOW DATABASES; -~~~ - -~~~ -+----------+ -| Database | -+----------+ -| bank | -| system | -+----------+ -~~~ - -## See Also - -- [`SHOW DATABASES`](show-databases.html) -- [`RENAME DATABASE`](rename-database.html) -- [`SET DATABASE`](set-vars.html) -- [`DROP DATABASE`](drop-database.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/create-index.md b/src/current/v1.0/create-index.md deleted file mode 100644 index 77cc180357b..00000000000 --- a/src/current/v1.0/create-index.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: CREATE INDEX -summary: The CREATE INDEX statement creates an index for a table. Indexes improve your database's performance by helping SQL quickly locate data. -toc: true ---- - -The `CREATE INDEX` [statement](sql-statements.html) creates an index for a table. [Indexes](indexes.html) improve your database's performance by helping SQL locate data without having to look through every row of a table. - -{{site.data.alerts.callout_info}}Indexes are automatically created for a table's PRIMARY KEY and UNIQUE columns.

When querying a table, CockroachDB uses the fastest index. For more information about that process, see Index Selection in CockroachDB.{{site.data.alerts.end}} - - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the table. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/create_index.html %} - -## Parameters - - - -| Parameter | Description | -|-----------|-------------| -|`UNIQUE` | Apply the [Unique constraint](unique.html) to the indexed columns.

This causes the system to check for existing duplicate values on index creation. It also applies the Unique constraint at the table level, so the system checks for duplicate values when inserting or updating data.| -|`IF NOT EXISTS` | Create a new index only if an index of the same name does not already exist; if one does exist, do not return an error.| -|`index_name` | The [`name`](sql-grammar.html#name) of the index to create, which must be unique to its table and follow these [identifier rules](keywords-and-identifiers.html#identifiers).

If you do not specify a name, CockroachDB uses the format `__key/idx`. `key` indicates the index applies the Unique constraint; `idx` indicates it does not. Example: `accounts_balance_idx`| -|`table_name` | The [`qualified_name`](sql-grammar.html#qualified_name) of the table you want to create the index on. | -|`column_name` | The name of the column you want to index.| -|`ASC` or `DESC`| Sort the column in ascending (`ASC`) or descending (`DESC`) order in the index. How columns are sorted affects query results, particularly when using `LIMIT`.

__Default:__ `ASC`| -|`STORING ...`| Store (but do not sort) each column whose name you include.

For information on when to use `STORING`, see [Store Columns](#store-columns).

`COVERING` aliases `STORING` and works identically. - -## Examples - -### Create Indexes - -To create the most efficient indexes, we recommend reviewing: - -- [Indexes: Best Practices](indexes.html#best-practices) -- [Index Selection in CockroachDB](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/) - -#### Single-Column Indexes - -Single-column indexes sort the values of a single column. - -~~~ sql -> CREATE INDEX ON products (price); -~~~ - -Because each query can only use one index, single-column indexes are not typically as useful as multiple-column indexes. - -#### Multiple-Column Indexes - -Multiple-column indexes sort columns in the order you list them. - -~~~ sql -> CREATE INDEX ON products (price, stock); -~~~ - -To create the most useful multiple-column indexes, we recommend reviewing our [best practices](indexes.html#indexing-columns). - -#### Unique Indexes - -Unique indexes do not allow duplicate values among their columns. - -~~~ sql -> CREATE UNIQUE INDEX ON products (name, manufacturer_id); -~~~ - -This also applies the [Unique constraint](unique.html) at the table level, similarly to [`ALTER TABLE`](alter-table.html). The above example is equivalent to: - -~~~ sql -> ALTER TABLE products ADD CONSTRAINT products_name_manufacturer_id_key UNIQUE (name, manufacturer_id); -~~~ - -### Store Columns - -Storing a column improves the performance of queries that retrieve (but don’t filter) its values. - -~~~ sql -> CREATE INDEX ON products (price) STORING (name); -~~~ - -However, to use stored columns, queries must filter another column in the same index. For example, SQL can retrieve `name` values from the above index only when a query's `WHERE` clause filters `price`. - -### Change Column Sort Order - -To sort columns in descending order, you must explicitly set the option when creating the index. (Ascending order is the default.) - -~~~ sql -> CREATE INDEX ON products (price DESC, stock); -~~~ - -How columns are sorted impacts the order of rows returned by queries using the index, which particularly affects queries using `LIMIT`. - -### Query Specific Indexes - -Normally, CockroachDB selects the index that it calculates will scan the fewest rows. However, you can override that selection and specify the name of the index you want to use. To find the name, use [`SHOW INDEX`](show-index.html). - -~~~ sql -> SHOW INDEX FROM products; -~~~ -~~~ -+----------+--------------------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+----------+--------------------+--------+-----+--------+-----------+---------+----------+ -| products | primary | true | 1 | id | ASC | false | false | -| products | products_price_idx | false | 1 | price | ASC | false | false | -| products | products_price_idx | false | 2 | id | ASC | false | true | -+----------+--------------------+--------+-----+--------+-----------+---------+----------+ -(3 rows) -~~~ -~~~ sql -> SELECT name FROM products@products_price_idx WHERE price > 10; -~~~ - -## See Also - -- [Indexes](indexes.html) -- [`SHOW INDEX`](show-index.html) -- [`DROP INDEX`](drop-index.html) -- [`RENAME INDEX`](rename-index.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/create-security-certificates.md b/src/current/v1.0/create-security-certificates.md deleted file mode 100644 index 87a902e3524..00000000000 --- a/src/current/v1.0/create-security-certificates.md +++ /dev/null @@ -1,272 +0,0 @@ ---- -title: Create Security Certificates -summary: A secure CockroachDB cluster uses TLS for encrypted inter-node and client-node communication and requires CA, node, and client certificates and keys. -toc: true ---- - -A secure CockroachDB cluster uses [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) for encrypted inter-node and client-node communication and requires CA, node, and client certificates and keys. To create these certificates and keys, use the `cockroach cert` [command](cockroach-commands.html) with the appropriate subcommands and flags. - -When using `cockroach cert` to create node and client certificates, you will need access to a local copy of the CA certificate and key. It is therefore recommended to create all certificates and keys in one place and then distribute node and client certificates and keys appropriately. For the CA key, be sure to store it somewhere safe and keep a backup; if you lose it, you will not be able to add new nodes or clients to your cluster. For a walkthrough of this process, see [Manual Deployment](manual-deployment.html). - - -## Subcommands - -Subcommand | Usage ------------|------ -`create-ca` | Create the self-signed certificate authority (CA), which you'll use to create and authenticate certificates for your entire cluster. -`create-node` | Create a certificate and key for a specific node in the cluster. You specify all addresses at which the node can be reached and pass appropriate flags. -`create-client` | Create a certificate and key for a [specific user](create-and-manage-users.html) accessing the cluster from a client. You specify the username of the user who will use the certificate and pass appropriate flags. -`list` | List certificates and keys found in the certificate directory. - -## Certificate Directory - -The `create-*` subcommands generate the CA certificate and all node and client certificates and keys in a single directory specified by the `--certs-dir` flag, with the files named as follows: - -File name pattern | File usage --------------|------------ -`ca.crt` | CA certificate -`node.crt` | Server certificate -`node.key` | Key for server certificate -`client..crt` | Client certificate for `` (eg: `client.root.crt` for user `root`) -`client..key` | Key for the client certificate - -Note the following: - -- The CA key is never loaded automatically by `cockroach` commands, so it should be created in a separate directory, identified by the `--ca-key` flag. - -- Keys (files ending in `.key`) must not have group or world permissions (maximum permissions are 0700, or `rwx------`). This check can be disabled by setting the environment variable `COCKROACH_SKIP_KEY_PERMISSION_CHECK=true`. - -## Synopsis - -~~~ shell -# Create the CA certificate and key: -$ cockroach cert create-ca \ - --certs-dir=[path-to-certs-directory] \ - --ca-key=[path-to-ca-key] - -# Create a node certificate and key, specifying all addresses at which the node can be reached: -$ cockroach cert create-node \ - [node-hostname] \ - [node-other-hostname] \ - [node-yet-another-hostname] \ - --certs-dir=[path-to-certs-directory] \ - --ca-key=[path-to-ca-key] - -# Create a client certificate and key: -$ cockroach cert create-client \ - [username] \ - --certs-dir=[path-to-certs-directory] \ - --ca-key=[path-to-ca-key] - -# List certificates and keys: -$ cockroach cert list \ - --certs-dir=[path-to-certs-directory] - -# View help: -$ cockroach cert --help -$ cockroach cert create-ca --help -$ cockroach cert create-node --help -$ cockroach cert create-client --help -$ cockroach cert list --help -~~~ - -## Flags - -The `cert` command and subcommands support the following [general-use](#general) and [logging](#logging) flags. - -### General - -Flag | Description ------|----------- -`--certs-dir` | The path to the [certificate directory](#certificate-directory) containing all certificates and keys needed by `cockroach` commands.

This flag is used by all subcommands.

**Default:** `${HOME}/.cockroach-certs/` -`--ca-key` | The path to the private key protecting the CA certificate.

This flag is required for all `create-*` subcommands. When used with `create-ca` in particular, it defines where to create the CA key; the specified directory must exist.

**Env Variable:** `COCKROACH_CA_KEY` -`--allow-ca-key-reuse` | When running the `create-ca` subcommand, pass this flag to re-use an existing CA key identified by `--ca-key`. Otherwise, a new CA key will be generated.

This flag is used only by the `create-ca` subcommand. It helps avoid accidentally re-using an existing CA key. -`--overwrite` | When running `create-*` subcommands, pass this flag to allow existing files in the certificate directory (`--certs-dir`) to be overwritten.

This flag helps avoid accidentally overwriting sensitive certificates and keys. -`--lifetime` | The lifetime of the certificate, in hours, minutes, and seconds.

Certificates are valid from the time they are created through the duration specified in `--lifetime`.

**Default:** `87840h0m0s` (10 years) -`--key-size` | The size of the CA, node, or client key, in bits.

**Default:** `2048` - -### Logging - -By default, the `cert` command logs errors to `stderr`. - -If you need to troubleshoot this command's behavior, you can change its [logging behavior](debug-and-error-logs.html). - -## Examples - -### Create the CA certificate and key pair - -1. Create two directories: - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir certs - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir my-safe-directory - ~~~ - - `certs`: You'll generate your CA certificate and all node and client certificates and keys in this directory and then upload some of the files to your nodes. - - `my-safe-directory`: You'll generate your CA key in this directory and then reference the key when generating node and client certificates. After that, you'll keep the key safe and secret; you will not upload it to your nodes. - -2. Generate the CA certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-ca \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ ls -l certs - ~~~ - - ~~~ - total 8 - -rw-r--r-- 1 maxroach maxroach 1.1K Jul 10 14:12 ca.crt - ~~~ - -### Create the certificate and key pairs for nodes - -1. Generate the certificate and key for the first node: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - node1.example.com \ - node1.another-example.com \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ ls -l certs - ~~~ - - ~~~ - total 24 - -rw-r--r-- 1 maxroach maxroach 1.1K Jul 10 14:12 ca.crt - -rw-r--r-- 1 maxroach maxroach 1.2K Jul 10 14:16 node.crt - -rw------- 1 maxroach maxroach 1.6K Jul 10 14:16 node.key - ~~~ - -2. Upload certificates to the first node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -3. Delete the local copy of the first node's certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ rm certs/node.crt certs/node.key - ~~~ - - {{site.data.alerts.callout_info}}This is necessary because the certificates and keys for additional nodes will also be named node.crt and node.key As an alternative to deleting these files, you can run the next cockroach cert create-node commands with the --overwrite flag.{{site.data.alerts.end}} - -4. Create the certificate and key for the second node: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - node2.example.com \ - node2.another-example.com \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ ls -l certs - ~~~ - - ~~~ - total 24 - -rw-r--r-- 1 maxroach maxroach 1.1K Jul 10 14:12 ca.crt - -rw-r--r-- 1 maxroach maxroach 1.2K Jul 10 14:17 node.crt - -rw------- 1 maxroach maxroach 1.6K Jul 10 14:17 node.key - ~~~ - -5. Upload certificates to the second node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -6. Repeat steps 3 - 5 for each additional node. - -### Create the certificate and key pair for a client - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach cert create-client \ -maxroach \ ---certs-dir=certs \ ---ca-key=my-safe-directory/ca.key -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ ls -l certs -~~~ - -~~~ -total 40 --rw-r--r-- 1 maxroach maxroach 1.1K Jul 10 14:12 ca.crt --rw-r--r-- 1 maxroach maxroach 1.1K Jul 10 14:13 client.maxroach.crt --rw------- 1 maxroach maxroach 1.6K Jul 10 14:13 client.maxroach.key --rw-r--r-- 1 maxroach maxroach 1.2K Jul 10 14:17 node.crt --rw------- 1 maxroach maxroach 1.6K Jul 10 14:17 node.key -~~~ - -### List certificates and keys - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach cert list \ ---certs-dir=certs -~~~ - -~~~ -Certificate directory: certs -+-----------------------+---------------------+---------------------+------------+--------------------------------------------------------+-------+ -| Usage | Certificate File | Key File | Expires | Notes | Error | -+-----------------------+---------------------+---------------------+------------+--------------------------------------------------------+-------+ -| Certificate Authority | ca.crt | | 2027/07/18 | num certs: 1 | | -| Node | node.crt | node.key | 2022/07/14 | addresses: node2.example.com,node2.another-example.com | | -| Client | client.maxroach.crt | client.maxroach.key | 2022/07/14 | user: maxroach | | -+-----------------------+---------------------+---------------------+------------+--------------------------------------------------------+-------+ -(3 rows) -~~~ - -## See Also - -- [Manual Deployment](manual-deployment.html): Walkthrough starting a multi-node secure cluster and accessing it from a client. -- [Start a Node](start-a-node.html): Learn more about the flags you pass when adding a node to a secure cluster. -- [Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/create-table-as.md b/src/current/v1.0/create-table-as.md deleted file mode 100644 index c39f9d9662a..00000000000 --- a/src/current/v1.0/create-table-as.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: CREATE TABLE AS -summary: The CREATE TABLE AS statement persists the result of a query into the database for later reuse. -toc: true ---- - -The `CREATE TABLE ... AS` statement creates a new table from the results of a query. - - -## Intended Use - -Tables created with `CREATE TABLE ... AS` are intended to persist the -result of a query for later reuse. - -This can be more efficient than a [view](create-view.html) when the -following two conditions are met: - -- The result of the query is used as-is multiple times. -- The copy needs not be kept up-to-date with the original table over time. - -When the results of a query are reused multiple times within a larger -query, a view is advisable instead. The query optimizer can "peek" -into the view and optimize the surrounding query using the primary key -and indices of the tables mentioned in the view query. - -A view is also advisable when the results must be up-to-date; a view -always retrieves the current data from the tables that the view query -mentions. - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the parent database. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/create_table_as.html %} - -## Parameters - - - -| Parameter | Description | -|-----------|-------------| -| `IF NOT EXISTS` | Create a new table only if a table of the same name does not already exist in the database; if one does exist, do not return an error.

Note that `IF NOT EXISTS` checks the table name only; it does not check if an existing table has the same columns, indexes, constraints, etc., of the new table. | -| `any_name` | The name of the table to create, which must be unique within its database and follow these [identifier rules](keywords-and-identifiers.html#identifiers). When the parent database is not set as the default, the name must be formatted as `database.name`.

The [`UPSERT`](upsert.html) and [`INSERT ON CONFLICT`](insert.html) statements use a temporary table called `excluded` to handle uniqueness conflicts during execution. It's therefore not recommended to use the name `excluded` for any of your tables. | -| `name` | The name of the column you want to use instead of the name of the column from `select_stmt`. | -| `select_stmt` | The query whose results you want to use to create the table. This can use [`SELECT`](select.html), `TABLE` or `VALUES`. | - -## Limitations - -The [primary key](primary-key.html) of tables created with `CREATE -TABLE ... AS` is not derived from the query results. Like for other -tables, it is not possible to add or change the primary key after -creation. Moreover, these tables are not -[interleaved](interleave-in-parent.html) with other tables. The -default rules for [column families](column-families.html) apply. - -For example: - -~~~ sql -> CREATE TABLE logoff ( - user_id INT PRIMARY KEY, - user_email STRING UNIQUE, - logoff_date DATE NOT NULL, -); -> CREATE TABLE logoff_copy AS TABLE logoff; -> SHOW CREATE TABLE logoff_copy; -~~~ -~~~ -+-------------+-----------------------------------------------------------------+ -| Table | CreateTable | -+-------------+-----------------------------------------------------------------+ -| logoff_copy | CREATE TABLE logoff_copy ( | -| | user_id INT NULL, | -| | user_email STRING NULL, | -| | logoff_date DATE NULL, | -| | FAMILY "primary" (user_id, user_email, logoff_date, rowid) | -| | ) | -+-------------+-----------------------------------------------------------------+ -(1 row) -~~~ - -The example illustrates that the primary key, unique and "not null" -constraints are not propagated to the copy. - -It is however possible to -[create a secondary index](create-index.html) after `CREATE TABLE -... AS`. - -For example: - -~~~ sql -> CREATE INDEX logoff_copy_id_idx ON logoff_copy(user_id); -> SHOW CREATE TABLE logoff_copy; -~~~ -~~~ -+-------------+-----------------------------------------------------------------+ -| Table | CreateTable | -+-------------+-----------------------------------------------------------------+ -| logoff_copy | CREATE TABLE logoff_copy ( | -| | user_id INT NULL, | -| | user_email STRING NULL, | -| | logoff_date DATE NULL, | -| | INDEX logoff_copy_id_idx (user_id ASC), | -| | FAMILY "primary" (user_id, user_email, logoff_date, rowid) | -| | ) | -+-------------+-----------------------------------------------------------------+ -(1 row) -~~~ - -For maximum data storage optimization, consider using separately -[`CREATE`](create-table.html) followed by -[`INSERT INTO ...`](insert.html) to populate the table using the query -results. - -## Examples - -### Create a Table from a `SELECT` Query - -~~~ sql -> SELECT * FROM customers WHERE state = 'NY'; -~~~ -~~~ -+----+---------+-------+ -| id | name | state | -+----+---------+-------+ -| 6 | Dorotea | NY | -| 15 | Thales | NY | -+----+---------+-------+ -~~~ -~~~ sql -> CREATE TABLE customers_ny AS SELECT * FROM customers WHERE state = 'NY'; - -> SELECT * FROM customers_ny; -~~~ -~~~ -+----+---------+-------+ -| id | name | state | -+----+---------+-------+ -| 6 | Dorotea | NY | -| 15 | Thales | NY | -+----+---------+-------+ -~~~ - -### Change Column Names - - - -This statement creates a copy of an existing table but with changed column names. - - -~~~ sql -> CREATE TABLE customers_ny (id, first_name) AS SELECT id, name FROM customers WHERE state = 'NY'; - -> SELECT * FROM customers_ny; -~~~ -~~~ -+----+------------+ -| id | first_name | -+----+------------+ -| 6 | Dorotea | -| 15 | Thales | -+----+------------+ -~~~ - -### Create a Table from a `VALUES` Clause - -~~~ sql -> CREATE TABLE tech_states AS VALUES ('CA'), ('NY'), ('WA'); - -> SELECT * FROM tech_states; -~~~ -~~~ -+---------+ -| column1 | -+---------+ -| CA | -| NY | -| WA | -+---------+ -(3 rows) -~~~ - - -### Create a Copy of an Existing Table - -~~~ sql -> CREATE TABLE customers_ny_copy AS TABLE customers_ny; - -> SELECT * FROM customers_ny_copy; -~~~ -~~~ -+----+------------+ -| id | first_name | -+----+------------+ -| 6 | Dorotea | -| 15 | Thales | -+----+------------+ -~~~ - -When a table copy is created this way, the copy is not associated to -any primary key, secondary index or constraint that was present on the -original table. - -## See Also - -- [`CREATE TABLE`](create-table.html) -- [`CREATE VIEW`](create-view.html) -- [`SELECT`](select.html) -- [`INSERT`](insert.html) -- [`DROP TABLE`](drop-table.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/create-table.md b/src/current/v1.0/create-table.md deleted file mode 100644 index 36981398a50..00000000000 --- a/src/current/v1.0/create-table.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: CREATE TABLE -summary: The CREATE TABLE statement creates a new table in a database. -toc: true ---- - -The `CREATE TABLE` [statement](sql-statements.html) creates a new table in a database. - - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the parent database. - -## Synopsis - -
- - -

- -
-{% include {{ page.version.version }}/sql/diagrams/create_table.html %} -
- -
- -{% include {{ page.version.version }}/sql/diagrams/create_table.html %} - -**column_def ::=** - -{% include {{ page.version.version }}/sql/diagrams/column_def.html %} - -**col_qual_list ::=** - -
- {% include {{ page.version.version }}/sql/diagrams/col_qual_list.html %} -
- -**index_def ::=** - -{% include {{ page.version.version }}/sql/diagrams/index_def.html %} - -**family_def ::=** - -{% include {{ page.version.version }}/sql/diagrams/family_def.html %} - -**table_constraint ::=** - -
- {% include {{ page.version.version }}/sql/diagrams/table_constraint.html %} -
- -**opt_interleave ::=** - -{% include {{ page.version.version }}/sql/diagrams/opt_interleave.html %} - -
- -{{site.data.alerts.callout_success}}To create a table from the results of a SELECT statement, use CREATE TABLE AS. -{{site.data.alerts.end}} - -## Parameters - -Parameter | Description -----------|------------ -`IF NOT EXISTS` | Create a new table only if a table of the same name does not already exist in the database; if one does exist, do not return an error.

Note that `IF NOT EXISTS` checks the table name only; it does not check if an existing table has the same columns, indexes, constraints, etc., of the new table. -`any_name` | The name of the table to create, which must be unique within its database and follow these [identifier rules](keywords-and-identifiers.html#identifiers). When the parent database is not set as the default, the name must be formatted as `database.name`.

The [`UPSERT`](upsert.html) and [`INSERT ON CONFLICT`](insert.html) statements use a temporary table called `excluded` to handle uniqueness conflicts during execution. It's therefore not recommended to use the name `excluded` for any of your tables. -`column_def` | A comma-separated list of column definitions. Each column requires a [name/identifier](keywords-and-identifiers.html#identifiers) and [data type](data-types.html); optionally, a [column-level constraint](constraints.html) can be specified. Column names must be unique within the table but can have the same name as indexes or constraints.

Any Primary Key, Unique, and Check [constraints](constraints.html) defined at the column level are moved to the table-level as part of the table's creation. Use the [`SHOW CREATE TABLE`](show-create-table.html) statement to view them at the table level. -`index_def` | An optional, comma-separated list of [index definitions](indexes.html). For each index, the column(s) to index must be specified; optionally, a name can be specified. Index names must be unique within the table and follow these [identifier rules](keywords-and-identifiers.html#identifiers). See the [Create a Table with Secondary Indexes](#create-a-table-with-secondary-indexes) example below.

The [`CREATE INDEX`](create-index.html) statement can be used to create an index separate from table creation. -`family_def` | An optional, comma-separated list of [column family definitions](column-families.html). Column family names must be unique within the table but can have the same name as columns, constraints, or indexes.

A column family is a group of columns that are stored as a single key-value pair in the underlying key-value store. CockroachDB automatically groups columns into families to ensure efficient storage and performance. However, there are cases when you may want to manually assign columns to families. For more details, see [Column Families](column-families.html). -`table_constraint` | An optional, comma-separated list of [table-level constraints](constraints.html). Constraint names must be unique within the table but can have the same name as columns, column families, or indexes. -`opt_interleave` | You can potentially optimize query performance by [interleaving tables](interleave-in-parent.html), which changes how CockroachDB stores your data. - -## Table-Level Replication - -By default, tables are created in the default replication zone but can be placed into a specific replication zone. See [Create a Replication Zone for a Table](configure-replication-zones.html#create-a-replication-zone-for-a-table) for more information. - -## Examples - -### Create a Table (No Primary Key Defined) - -In CockroachDB, every table requires a [primary key](primary-key.html). If one is not explicitly defined, a column called `rowid` of the type `INT` is added automatically as the primary key, with the `unique_rowid()` function used to ensure that new rows always default to unique `rowid` values. The primary key is automatically indexed. - -{{site.data.alerts.callout_info}}Strictly speaking, a primary key's unique index is not created; it is derived from the key(s) under which the data is stored, so it takes no additional space. However, it appears as a normal unique index when using commands like SHOW INDEX.{{site.data.alerts.end}} - -~~~ sql -> CREATE TABLE logon ( - user_id INT, - logon_date DATE -); - -> SHOW COLUMNS FROM logon; -~~~ - -~~~ -+------------+------+------+---------+---------+ -| Field | Type | Null | Default | Indices | -+------------+------+------+---------+---------+ -| user_id | INT | true | NULL | {} | -| logon_date | DATE | true | NULL | {} | -+------------+------+------+---------+---------+ -(2 rows) -~~~ - -~~~ sql -> SHOW INDEX FROM logon; -~~~ - -~~~ -+-------+---------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+-------+---------+--------+-----+--------+-----------+---------+----------+ -| logon | primary | true | 1 | rowid | ASC | false | false | -+-------+---------+--------+-----+--------+-----------+---------+----------+ -(1 row) -~~~ - -### Create a Table (Primary Key Defined) - -In this example, we create a table with three columns. One column is the [primary key](primary-key.html), another is given the [Unique constraint](unique.html), and the third has no constraints. The primary key and column with the Unique constraint are automatically indexed. - -~~~ sql -> CREATE TABLE logoff ( - user_id INT PRIMARY KEY, - user_email STRING UNIQUE, - logoff_date DATE -); - -> SHOW COLUMNS FROM logoff; -~~~ - -~~~ -+-------------+--------+-------+---------+---------------------------------+ -| Field | Type | Null | Default | Indices | -+-------------+--------+-------+---------+---------------------------------+ -| user_id | INT | false | NULL | {primary,logoff_user_email_key} | -| user_email | STRING | true | NULL | {logoff_user_email_key} | -| logoff_date | DATE | true | NULL | {} | -+-------------+--------+-------+---------+---------------------------------+ -(3 rows) -~~~ - -~~~ sql -> SHOW INDEX FROM logoff; -~~~ - -~~~ -+--------+-----------------------+--------+-----+------------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+--------+-----------------------+--------+-----+------------+-----------+---------+----------+ -| logoff | primary | true | 1 | user_id | ASC | false | false | -| logoff | logoff_user_email_key | true | 1 | user_email | ASC | false | false | -| logoff | logoff_user_email_key | true | 2 | user_id | ASC | false | true | -+--------+-----------------------+--------+-----+------------+-----------+---------+----------+ -(3 rows) -~~~ - -### Create a Table with Secondary Indexes - -In this example, we create two secondary indexes during table creation. Secondary indexes allow efficient access to data with keys other than the primary key. This example also demonstrates a number of column-level and table-level [constraints](constraints.html). - -~~~ sql -> CREATE TABLE product_information ( - product_id INT PRIMARY KEY NOT NULL, - product_name STRING(50) UNIQUE NOT NULL, - product_description STRING(2000), - category_id STRING(1) NOT NULL CHECK (category_id IN ('A','B','C')), - weight_class INT, - warranty_period INT CONSTRAINT valid_warranty CHECK (warranty_period BETWEEN 0 AND 24), - supplier_id INT, - product_status STRING(20), - list_price DECIMAL(8,2), - min_price DECIMAL(8,2), - catalog_url STRING(50) UNIQUE, - date_added DATE DEFAULT CURRENT_DATE(), - CONSTRAINT price_check CHECK (list_price >= min_price), - INDEX date_added_idx (date_added), - INDEX supp_id_prod_status_idx (supplier_id, product_status) -); - -> SHOW INDEX FROM product_information; -~~~ - -~~~ -+---------------------+--------------------------------------+--------+-----+----------------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+---------------------+--------------------------------------+--------+-----+----------------+-----------+---------+----------+ -| product_information | primary | true | 1 | product_id | ASC | false | false | -| product_information | product_information_product_name_key | true | 1 | product_name | ASC | false | false | -| product_information | product_information_product_name_key | true | 2 | product_id | ASC | false | true | -| product_information | product_information_catalog_url_key | true | 1 | catalog_url | ASC | false | false | -| product_information | product_information_catalog_url_key | true | 2 | product_id | ASC | false | true | -| product_information | date_added_idx | false | 1 | date_added | ASC | false | false | -| product_information | date_added_idx | false | 2 | product_id | ASC | false | true | -| product_information | supp_id_prod_status_idx | false | 1 | supplier_id | ASC | false | false | -| product_information | supp_id_prod_status_idx | false | 2 | product_status | ASC | false | false | -| product_information | supp_id_prod_status_idx | false | 3 | product_id | ASC | false | true | -+---------------------+--------------------------------------+--------+-----+----------------+-----------+---------+----------+ -(10 rows) -~~~ - -We also have other resources on indexes: - -- Create indexes for existing tables using [`CREATE INDEX`](create-index.html). -- [Learn more about indexes](indexes.html). - -### Create a Table with Auto-Generated Unique Row IDs - -{% include {{ page.version.version }}/faq/auto-generate-unique-ids.html %} - -### Create a Table with Foreign Keys - -[Foreign keys](foreign-key.html) guarantee a column uses only values that already exist in the column it references, which must be from another table. This constraint enforces referential integrity between the two tables. - -There are a [number of rules](foreign-key.html#rules-for-creating-foreign-keys) that govern foreign keys, but the two most important are: - -- Foreign key columns must be [indexed](indexes.html) when creating the table using `INDEX`, `PRIMARY KEY`, or `UNIQUE`. - -- Referenced columns must contain only unique values. This means the `REFERENCES` clause must use exactly the same columns as a [Primary Key](primary-key.html) or [Unique](unique.html) constraint. - -In this example, we'll show a series of tables using different formats of foreign keys. - -~~~ sql -> CREATE TABLE customers (id INT PRIMARY KEY, email STRING UNIQUE); - -> CREATE TABLE products (sku STRING PRIMARY KEY, price DECIMAL(9,2)); - -> CREATE TABLE orders ( - id INT PRIMARY KEY, - product STRING NOT NULL REFERENCES products, - quantity INT, - customer INT NOT NULL CONSTRAINT valid_customer REFERENCES customers (id), - CONSTRAINT id_customer_unique UNIQUE (id, customer), - INDEX (product), - INDEX (customer) -); - -> CREATE TABLE reviews ( - id INT PRIMARY KEY, - product STRING NOT NULL REFERENCES products, - customer INT NOT NULL, - "order" INT NOT NULL, - body STRING, - CONSTRAINT order_customer_fk FOREIGN KEY ("order", customer) REFERENCES orders (id, customer), - INDEX (product), - INDEX (customer), - INDEX ("order", customer) -); -~~~ - -### Create a Table that Mirrors Key-Value Storage - -{% include {{ page.version.version }}/faq/simulate-key-value-store.html %} - -### Create a Table from a `SELECT` Statement - -You can use the [`CREATE TABLE AS`](create-table-as.html) statement to create a new table from the results of a `SELECT` statement, for example: - -~~~ sql -> SELECT * FROM customers WHERE state = 'NY'; -~~~ -~~~ -+----+---------+-------+ -| id | name | state | -+----+---------+-------+ -| 6 | Dorotea | NY | -| 15 | Thales | NY | -+----+---------+-------+ -~~~ -~~~ sql -> CREATE TABLE customers_ny AS SELECT * FROM customers WHERE state = 'NY'; - -> SELECT * FROM customers_ny; -~~~ -~~~ -+----+---------+-------+ -| id | name | state | -+----+---------+-------+ -| 6 | Dorotea | NY | -| 15 | Thales | NY | -+----+---------+-------+ -~~~ - -### Show the Definition of a Table - -To show the definition of a table, use the [`SHOW CREATE TABLE`](show-create-table.html) statement. The contents of the `CreateTable` column in the response is a string with embedded line breaks that, when echoed, produces formatted output. - -~~~ sql -> SHOW CREATE TABLE logoff; -~~~ - -~~~ -+--------+----------------------------------------------------------+ -| Table | CreateTable | -+--------+----------------------------------------------------------+ -| logoff | CREATE TABLE logoff ( | -| | user_id INT NOT NULL, | -| | user_email STRING(50) NULL, | -| | logoff_date DATE NULL, | -| | CONSTRAINT "primary" PRIMARY KEY (user_id), | -| | UNIQUE INDEX logoff_user_email_key (user_email), | -| | FAMILY "primary" (user_id, user_email, logoff_date) | -| | ) | -+--------+----------------------------------------------------------+ -(1 row) -~~~ - -## See Also - -- [`INSERT`](insert.html) -- [`ALTER TABLE`](alter-table.html) -- [`DELETE`](delete.html) -- [`DROP TABLE`](drop-table.html) -- [`RENAME TABLE`](rename-table.html) -- [`SHOW TABLES`](show-tables.html) -- [`SHOW COLUMNS`](show-columns.html) -- [Column Families](column-families.html) -- [Table-Level Replication Zones](configure-replication-zones.html#create-a-replication-zone-for-a-table) diff --git a/src/current/v1.0/create-user.md b/src/current/v1.0/create-user.md deleted file mode 100644 index 812e50513f2..00000000000 --- a/src/current/v1.0/create-user.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: CREATE USER -summary: The CREATE USER statement creates SQL users, which let you control privileges on your databases and tables. -toc: true ---- - -The `CREATE USER` [statement](sql-statements.html) creates SQL users, which let you control [privileges](privileges.html) on your databases and tables. - -When creating users, it's important to note: - -- Usernames are case-insensitive; must start with either a letter or underscore; must contain only letters, numbers, or underscores; and must be between 1 and 63 characters. -- After creating users, you must [grant them privileges to databases and tables](grant.html). -- On secure clusters, users must [authenticate their access to the cluster](#user-authentication). - -{{site.data.alerts.callout_info}}You can also create and manage users through the cockroach user command. Notably, this is the most efficient way to remove users.{{site.data.alerts.end}} - - -## Required Privileges - -The user must have the `INSERT` and `UPDATE` [privileges](privileges.html) on the `system.users` table. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/create_user.html %} - -## Parameters - - - -| Parameter | Description | -|-----------|-------------| -|`name` | The name of the user you want to create.

Usernames are case-insensitive; must start with either a letter or underscore; must contain only letters, numbers, or underscores; and must be between 1 and 63 characters.| -|`password` | Let the user [authenticate their access to a secure cluster](#user-authentication) using this password. Passwords must be entered as [string](string.html) values surrounded by single quotes (`'`).| - -## User Authentication - -Secure clusters require users to authenticate their access to databases and tables. CockroachDB offers two methods for this: - -- [Client certificate and key authentication](#secure-clusters-with-client-certificates), which is available to all users. To ensure the highest level of security, we recommend only using client certificate and key authentication. -- [Password authentication](#secure-clusters-with-passwords), which is available only to users who you've created passwords for. To create a user with a password, use the `WITH PASSWORD` clause of `CREATE USER`.

You can use this password to authenticate users without supplying their client certificate and key; however, we recommend instead using client certificate and key authentication whenever possible.

To add a password to an existing user, use the [`cockroach user` command](create-and-manage-users.html#update-a-users-password). - -{{site.data.alerts.callout_info}}Insecure clusters do not support user authentication, but you can still create passwords for users (besides root) through the WITH PASSWORD clause.{{site.data.alerts.end}} - -## Examples - -### Create a User - -~~~ sql -> CREATE USER jpointsman; -~~~ - -Usernames are case-insensitive; must start with either a letter or underscore; must contain only letters, numbers, or underscores; and must be between 1 and 63 characters. - -After creating users, you must [grant them privileges to databases and tables](grant.html). - -For users on secure clusters, you also need to generate [client certificates and keys](create-security-certificates.html#create-the-certificate-and-key-pair-for-a-client) to authenticate the user's access to the cluster. - -### Create a User with Password Authentication - -~~~ sql -> CREATE USER jpointsman WITH PASSWORD 'Q7gc8rEdS'; -~~~ - -{{site.data.alerts.callout_info}}We strongly recommend also creating client certificates and keys and using them to authenticate the user's access to the cluster.{{site.data.alerts.end}} - -### Manage Users - -After creating users, you can manage them using the [`cockroach user`](create-and-manage-users.html) command. - -### Authenticate as a Specific User - -#### Insecure Clusters - -~~~ shell -$ cockroach sql --insecure --user=jpointsman -~~~ - -#### Secure Clusters with Client Certificates - -All users can authenticate their access to a secure cluster using [a client certificate](create-security-certificates.html#create-the-certificate-and-key-pair-for-a-client) issued to their username. - -~~~ shell -$ cockroach sql --user=jpointsman -~~~ - -#### Secure Clusters with Passwords - -[Users with passwords](#create-a-user-with-password-authentication) can authenticate their access by entering their password at the command prompt instead of using their client certificate and key. - -~~~ shell -$ cockroach sql --user=jpointsman -~~~ - -## See Also - -- [`cockroach user` command](create-and-manage-users.html) -- [`SHOW USERS`](show-users.html) -- [`GRANT`](grant.html) -- [`SHOW GRANTS`](show-grants.html) -- [Create Security Certificates](create-security-certificates.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/create-view.md b/src/current/v1.0/create-view.md deleted file mode 100644 index 43cbe6584c2..00000000000 --- a/src/current/v1.0/create-view.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: CREATE VIEW -summary: The CREATE VIEW statement creates a . -toc: true ---- - -The `CREATE VIEW` statement creates a new [view](views.html), which is a stored `SELECT` query represented as a virtual table. - - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the parent database and the `SELECT` privilege on any table(s) referenced by the view. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/create_view.html %} - -## Parameters - -Parameter | Description -----------|------------ -`view_name` | The name of the view to create, which must be unique within its database and follow these [identifier rules](keywords-and-identifiers.html#identifiers). When the parent database is not set as the default, the name must be formatted as `database.name`. -`column_list` | An optional, comma-separated list of column names for the view. If specified, these names will be used in the response instead of the columns specified in `AS select_stmt`. -`AS select_stmt` | The [`SELECT`](select.html) statement to execute when the view is requested.

Note that it is not currently possible to use `*` to select all columns from a referenced table or view; instead, you must specify specific columns. - -## Example - -{{site.data.alerts.callout_success}}This example highlights one key benefit to using views: simplifying complex queries. For additional benefits and examples, see Views.{{site.data.alerts.end}} - -Let's say you're using our [sample `startrek` database](generate-cockroachdb-resources.html#generate-example-data), which contains two tables, `episodes` and `quotes`. There's a foreign key constraint between the `episodes.id` column and the `quotes.episode` column. To count the number of famous quotes per season, you could run the following `JOIN`: - -~~~ sql -> SELECT startrek.episodes.season, count(*) - FROM startrek.quotes - JOIN startrek.episodes - ON startrek.quotes.episode = startrek.episodes.id - GROUP BY startrek.episodes.season; -~~~ - -~~~ -+--------+----------+ -| season | count(*) | -+--------+----------+ -| 2 | 76 | -| 3 | 46 | -| 1 | 78 | -+--------+----------+ -(3 rows) -~~~ - -Alternatively, to make it much easier to run this complex query, you could create a view: - -~~~ sql -> CREATE VIEW startrek.quotes_per_season (season, quotes) - AS SELECT startrek.episodes.season, count(*) - FROM startrek.quotes - JOIN startrek.episodes - ON startrek.quotes.episode = startrek.episodes.id - GROUP BY startrek.episodes.season; -~~~ - -~~~ -CREATE VIEW -~~~ - -The view is then represented as a virtual table alongside other tables in the database: - -~~~ sql -> SHOW TABLES FROM startrek; -~~~ - -~~~ -+-------------------+ -| Table | -+-------------------+ -| episodes | -| quotes | -| quotes_per_season | -+-------------------+ -(4 rows) -~~~ - -Executing the query is as easy as `SELECT`ing from the view, as you would from a standard table: - -~~~ sql -> SELECT * FROM startrek.quotes_per_season; -~~~ - -~~~ -+--------+--------+ -| season | quotes | -+--------+--------+ -| 2 | 76 | -| 3 | 46 | -| 1 | 78 | -+--------+--------+ -(3 rows) -~~~ - -## See Also - -- [Views](views.html) -- [`SHOW CREATE VIEW`](show-create-view.html) -- [`ALTER VIEW`](alter-view.html) -- [`DROP VIEW`](drop-view.html) diff --git a/src/current/v1.0/data-types.md b/src/current/v1.0/data-types.md deleted file mode 100644 index 88ae37dc241..00000000000 --- a/src/current/v1.0/data-types.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Data Types -summary: Learn about the data types supported by CockroachDB. -toc: true ---- - -## Supported Types - -CockroachDB supports the following data types. Click a type for more details. - -Type | Description | Example ------|-------------|-------- -[`INT`](int.html) | A 64-bit signed integer. | `12345` -[`SERIAL`](serial.html) | A unique 64-bit signed integer. | `148591304110702593 ` -[`DECIMAL`](decimal.html) | An exact, fixed-point number. | `1.2345` -[`FLOAT`](float.html) | A 64-bit, inexact, floating-point number. | `1.2345` -[`BOOL`](bool.html) | A Boolean value. | `true` -[`DATE`](date.html) | A date. | `DATE '2016-01-25'` -[`TIMESTAMP`
`TIMESTAMPTZ`](timestamp.html) | A date and time pairing in UTC. | `TIMESTAMP '2016-01-25 10:10:10'`
`TIMESTAMPTZ '2016-01-25 10:10:10-05:00'` -[`INTERVAL`](interval.html) | A span of time. | `INTERVAL '2h30m30s'` -[`STRING`](string.html) | A string of Unicode characters. | `'a1b2c3'` -[`COLLATE`](collate.html) | The `COLLATE` feature lets you sort [`STRING`](string.html) values according to language- and country-specific rules, known as collations. | `'a1b2c3' COLLATE en` -[`BYTES`](bytes.html) | A string of binary characters. | `b'\141\061\142\062\143\063'` - -## Data Type Conversions & Casts - -CockroachDB supports explicit type conversions using the following methods: - -- ` 'string literal'`, to convert from the literal representation of a value to a value of that type. For example: - `DATE '2008-12-21'`, `INT '123'`, or `BOOL 'true'`. - -- `::`, or its equivalent longer form `CAST( AS )`, which converts an arbitrary expression of one built-in type to another (this is also known as type coercion or "casting"). For example: - `NOW()::DECIMAL`, `VARIANCE(a+2)::INT`. - - {{site.data.alerts.callout_success}} - To create constant values, consider using a - type annotation - instead of a cast, as it provides more predictable results. - {{site.data.alerts.end}} - -- Other [built-in conversion functions](functions-and-operators.html) when the type is not a SQL type, for example `from_ip()`, `to_ip()` to convert IP addresses between `STRING` and `BYTES` values. - - -You can find each data type's supported converstion and casting on its -respective page in its section **Supported Casting & Conversion**. diff --git a/src/current/v1.0/date.md b/src/current/v1.0/date.md deleted file mode 100644 index dcae39bf015..00000000000 --- a/src/current/v1.0/date.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: DATE -summary: CockroachDB's DATE data type stores a year, month, and day. -toc: true ---- - -The `DATE` [data type](data-types.html) stores a year, month, and day. - - -## Syntax - -A constant value of type `DATE` can be expressed using an -[interpreted literal](sql-constants.html#interpreted-literals), or a -string literal -[annotated with](sql-expressions.html#explicitly-typed-expressions) -type `DATE` or -[coerced to](sql-expressions.html#explicit-type-coercions) type -`DATE`. - -The string format for dates is `YYYY-MM-DD`. For example: `DATE '2016-12-23'`. - -CockroachDB also supports using uninterpreted -[string literals](sql-constants.html#string-literals) in contexts -where a `DATE` value is otherwise expected. - -## Size - -A `DATE` column supports values up to 8 bytes in width, but the total storage size is likely to be larger due to CockroachDB metadata. - -## Examples - -~~~ sql -> CREATE TABLE dates (a DATE PRIMARY KEY, b INT); - -> SHOW COLUMNS FROM dates; -~~~ -~~~ -+-------+------+-------+---------+ -| Field | Type | Null | Default | -+-------+------+-------+---------+ -| a | DATE | false | NULL | -| b | INT | true | NULL | -+-------+------+-------+---------+ -~~~ -~~~ sql -> -- explicitly typed DATE literal -> INSERT INTO dates VALUES (DATE '2016-03-26', 12345); - -> -- string literal implicitly typed as DATE -> INSERT INTO dates VALUES ('2016-03-27', 12345); - -> SELECT * FROM dates; -~~~ -~~~ -+---------------------------+-------+ -| a | b | -+---------------------------+-------+ -| 2016-03-26 00:00:00+00:00 | 12345 | -| 2016-03-27 00:00:00+00:00 | 12345 | -+---------------------------+-------+ -~~~ - -## Supported Casting & Conversion - -`DATE` values can be [cast](data-types.html#data-type-conversions-casts) to any of the following data types: - -Type | Details ------|-------- -`INT` | Converts to number of days since the Unix epoch (Jan. 1, 1970). This is a CockroachDB experimental feature which may be changed without notice. -`DECIMAL` | Converts to number of days since the Unix epoch (Jan. 1, 1970). This is a CockroachDB experimental feature which may be changed without notice. -`FLOAT` | Converts to number of days since the Unix epoch (Jan. 1, 1970). This is a CockroachDB experimental feature which may be changed without notice. -`TIMESTAMP` | Sets the time to 00:00 (midnight) in the resulting timestamp -`STRING` | –– - -{{site.data.alerts.callout_info}}Because the SERIAL data type represents values automatically generated by CockroachDB to uniquely identify rows, you cannot meaningfully cast other data types as SERIAL values.{{site.data.alerts.end}} - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/debug-and-error-logs.md b/src/current/v1.0/debug-and-error-logs.md deleted file mode 100644 index ea88ea5caca..00000000000 --- a/src/current/v1.0/debug-and-error-logs.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Debug & Error Logs -summary: CockroachDB logs include details about certain node-level and range-level events, such as errors. -toc: true ---- - -If you need to [troubleshoot](troubleshooting-overview.html) issues with your cluster, you can check a node's logs, which include details about certain node-level and range-level events, such as errors. For example, if CockroachDB crashes, it normally logs a stack trace to what caused the problem. - - -## Details - -When a node processes a [`cockroach` command](cockroach-commands.html), it produces a stream of messages about the command's activities. Each message's body describes the activity, and its envelope contains metadata such as the message's severity level. - -As a command generates messages, CockroachDB uses the [command](#commands)'s [logging flags](#flags) and the message's [severity level](#severity-levels) to determine the appropriate [location](#output-locations) for it. - -Each node's logs detail only the internal activity of that node without visibility into the behavior of other nodes in the cluster. When troubleshooting, this means that you must identify the node where the problem occurred or [collect the logs from all active nodes in your cluster](debug-zip.html). - -{{site.data.alerts.callout_info}}You can also log queries your cluster receives.{{site.data.alerts.end}} - -### Commands - -All [`cockroach` commands](cockroach-commands.html) support logging. However, it's important to note: - -- `cockroach start` generates most messages related to the operation of your cluster. -- Other commands do generate messages, but they're typically only interesting in troubleshooting scenarios. - -### Severity Levels - -CockroachDB identifies each message with a severity level, letting operators know if they need to intercede: - -1. `INFO` *(lowest severity; no action necessary)* -2. `WARNING` -3. `ERROR` -4. `FATAL` *(highest severity; requires operator attention)* - -**Default Behavior by Severity Level** - -Command | `INFO` messages | `WARNING` and above messages ---------|--------|-------------------- -[`cockroach start`](start-a-node.html) | Write to file | Write to file -[All other commands](cockroach-commands.html) | Discard | Print to `stderr` - -### Output Locations - -Based on the command's flags and the message's [severity level](#severity-levels), CockroachDB does one of the following: - -- [Writes the message to a file](#write-to-file) -- [Prints it to `stderr`](#print-to-stderr) -- [Discards the message entirely](#discard-message) - -#### Write to File - -CockroachDB can write messages to log files, which use the following format: - -~~~ -cockroach.[host].[user].[start timestamp in system time].[process ID].log -~~~ - -Property | `cockroach start` | All other commands ----------|-------------------|------------------- -Enabled by | Default1 | Explicit `--log-dir` flag -Default File Destination | `[first `[`store`](start-a-node.html#store)` dir]/logs` | *N/A* -Change File Destination | `--log-dir=[destination]` | `--log-dir=[destination]` -Default Severity Level Threshold | `INFO` | *N/A* -Change Severity Threshold | `--log-file-verbosity=[severity level]` | `--log-file-verbosity=[severity level]` -Disabled by | `--log-dir=`1 | Default - -{{site.data.alerts.callout_info}}1 If the cockroach process does not have access to on-disk storage, cockroach start does not write messages to log files; instead it prints all messages to stderr.{{site.data.alerts.end}} - -#### Print to `stderr` - -CockroachDB can print messages to `stderr`, which normally prints them to the machine's terminal but does not store them. - -Property | `cockroach start` | All other commands ----------|-------------------|------------------- -Enabled by | Explicit `--logtostderr` flag2 | Default -Default Severity Level Threshold | *N/A* | `WARNING` -Change Severity Threshold | `--logtostderr=[severity level]` | `--logtostderr=[severity level]` -Disabled by | Default2 | `--logtostderr=NONE` - -{{site.data.alerts.callout_info}}2 cockroach start does not print any messages to stderr unless the cockroach process does not have access to on-disk storage, in which case it defaults to --logtostderr=INFO and prints all messages to stderr.{{site.data.alerts.end}} - -#### Discard Message - -Messages with severity levels below the `--logtostderr` and `--log-file-verbosity` flag's values are neither written to files nor printed to `stderr`, so they are discarded. - -By default, commands besides `cockroach start` discard messages with the `INFO` [severity level](#severity-levels). - -## Flags - -{% include {{ page.version.version }}/misc/logging-flags.md %} - -The `--log-backtrace-at`, `--verbosity`, and `--v` flags are intended for internal debugging by CockroachDB contributors. - -## Log Queries - -To help troubleshoot [query performance issues](query-behavior-troubleshooting.html#performance-issues), you can use [cluster-wide settings](cluster-settings.html) to enable logging for long-running SQL transactions or all queries, regardless of time. - -{{site.data.alerts.callout_danger}}These settings makes all queries slower and causes nodes to consume more memory. You should disable query logging as soon as you're done troubleshooting the query's issues.{{site.data.alerts.end}} - -### Enable Query Logging - -- **Long-running transactions**: - - ~~~ sql - > SET CLUSTER SETTING sql.trace.txn.enable_threshold = '[time]'; - ~~~ - - The `[time]` parameter accepts common time specifiers, such as `100ms` or `2s`. - -- **All queries**: - - ~~~ sql - > SET CLUSTER SETTING sql.trace.log_statement_execute = true; - ~~~ - -### Details - -After you enable query logging, whenever nodes process SQL statements, they generate messages with an `INFO` [severity level](#severity-levels). - -By default, these messages will get [written to files](#write-to-file), but are ultimate handled by whatever logging behavior you set for [`cockroach start`](start-a-node.html#logging). - -### Improve Query Performance - -After finding which queries are slow, use [`EXPLAIN`](explain.html) to examine them. It's possible that the query is slow because it performs a full-table scan. In these cases, you can likely improve the query's performance by [adding an index](create-index.html). - -*(More guidance around query performance optimization forthcoming.)* - -### Disable Query Logging - -Once you're done troubleshooting, you should disable query logging to prevent it from unnecessarily consuming resources. - -- **Long-running transactions**: - - ~~~ sql - > SET CLUSTER SETTING sql.trace.txn.enable_threshold = '0s'; - ~~~ - -- **All queries**: - - ~~~ sql - > SET CLUSTER SETTING sql.trace.log_statement_execute = false; - ~~~ - -## See Also - -- [Troubleshooting Overview](troubleshooting-overview.html) -- [Support Resources](support-resources.html) diff --git a/src/current/v1.0/debug-zip.md b/src/current/v1.0/debug-zip.md deleted file mode 100644 index b5cd49fc803..00000000000 --- a/src/current/v1.0/debug-zip.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Collect Debug Information from Your Cluster -summary: Learn the commands for collecting debug information from all nodes in your cluster. -toc: true ---- - -The `debug zip` [command](cockroach-commands.html) connects to your cluster and gathers the following information from each active node into a single file (inactive nodes are not included): - -- [Log files](debug-and-error-logs.html) -- Schema change events -- Node liveness -- Gossip data -- Stack traces -- Range lists -- A list of databases and tables - -{{site.data.alerts.callout_danger}}The file produced by cockroach debug zip can contain highly sensitive, unanonymized information, such as usernames, passwords, and possibly your table's data. You should share this data only with Cockroach Labs developers and only after determining the most secure method of delivery.{{site.data.alerts.end}} - - -## Details - -### Use Cases - -There are two scenarios in which `debug zip` is useful: - -- To collect all of your nodes' logs, which you can then parse to locate issues. It's important to note, though, that `debug zip` can only access logs from active nodes. See more information [on this page](#collecting-log-files). - -- If you experience severe or difficult-to-reproduce issues with your cluster, Cockroach Labs might ask you to send us your cluster's debugging information using `cockroach debug zip`. - -{{site.data.alerts.callout_danger}}The file produced by cockroach debug zip can contain highly sensitive, unanonymized information, such as usernames, passwords, and your table's data. You should share this data only with Cockroach Labs developers and only after determining the most secure method of delivery.{{site.data.alerts.end}} - -### Collecting Log Files - -When you issue the `debug zip` command, the node that receives the request connects to each other node in the cluster. Once it's connected, the node requests the content of all log files stored on the node, the location of which is determined by the `--log-dir` value when you [started the node](start-a-node.html). - -Because `debug zip` relies on CockroachDB's distributed architecture, this means that nodes not currently connected to the cluster cannot respond to the request, so their log files *are not* included. - -After receiving the log files from all of the active nodes, the requesting node aggregates the files and writes them to an archive file you specify. - -You can locate logs in the unarchived file's `debug/nodes/[node dir]/logs` directories. - -## Subcommands - -While the `cockroach debug` command has a few subcommands, the only subcommand users are expected to use is `zip` which collects all of your cluster's debug information in a single file. - -`debug`'s other subcommands are useful only to CockroachDB's developers and contributors. - -## Synopsis - -~~~ shell -# Generate a debug zip: -$ cockroach debug zip [ZIP file destination] [flags] -~~~ - -It's important to understand that the `[flags]` here are used to connect to CockroachDB nodes. This means the values you use in those flags must connect to an active node. If no nodes are live, you must [start at least one node](start-a-node.html). - -## Flags - -The `debug zip` subcommand supports the following [general-use](#general) and [logging](#logging) flags. - -### General - -Flag | Description ------|----------- -`--certs-dir` | The path to the [certificate directory](create-security-certificates.html). The directory must contain valid certificates if running in secure mode.

**Env Variable:** `COCKROACH_CERTS_DIR`
**Default:** `${HOME}/.cockroach-certs/` -`--host` | The server host to connect to. This can be the address of any node in the cluster.

**Env Variable:** `COCKROACH_HOST`
**Default:** `localhost` -`--insecure` | Run in insecure mode. If this flag is not set, the `--certs-dir` flag must point to valid certificates.

**Env Variable:** `COCKROACH_INSECURE`
**Default:** `false` -`--port`
`-p` | The server port to connect to.

**Env Variable:** `COCKROACH_PORT`
**Default:** `26257` - -### Logging - -By default, the `debug zip` command logs errors it experiences to `stderr`. Note that these are errors executing `debug zip`; these are not errors that the logs collected by `debug zip` contain. - -If you need to troubleshoot this command's behavior, you can also change its [logging behavior](debug-and-error-logs.html). - -## Examples - -### Generate a debug zip file - -~~~ shell -# Generate the debug zip file for an insecure cluster: -$ cockroach debug zip ./cockroach-data/logs/debug.zip --insecure - -# Generate the debug zip file for a secure cluster: -$ cockroach debug zip ./cockroach-data/logs/debug.zip - -# Generate the debug zip file from a remote machine: -$ cockroach debug zip ./crdb-debug.zip --host=200.100.50.25 -~~~ - -{{site.data.alerts.callout_info}}Secure examples assume you have the appropriate certificates in the default certificate directory, ${HOME}/.cockroach-certs/.{{site.data.alerts.end}} - -## See Also - -- [File an Issue](file-an-issue.html) -- [Other Cockroach Commands](cockroach-commands.html) -- [Troubleshooting Overview](troubleshooting-overview.html) diff --git a/src/current/v1.0/decimal.md b/src/current/v1.0/decimal.md deleted file mode 100644 index 09f17484dcc..00000000000 --- a/src/current/v1.0/decimal.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: DECIMAL -summary: The DECIMAL data type stores exact, fixed-point numbers. -toc: true ---- - -The `DECIMAL` [data type](data-types.html) stores exact, fixed-point numbers. This type is used when it is important to preserve exact precision, for example, with monetary data. - - -## Aliases - -In CockroachDB, the following are aliases for `DECIMAL`: - -- `DEC` -- `NUMERIC` - -## Precision and Scale - -To limit a decimal column, use `DECIMAL(precision, scale)`, where `precision` is the **maximum** count of digits both to the left and right of the decimal point and `scale` is the **exact** count of digits to the right of the decimal point. The `precision` must not be smaller than the `scale`. Also note that using `DECIMAL(precision)` is equivalent to `DECIMAL(precision, 0)`. - -When inserting a decimal value: - -- If digits to the right of the decimal point exceed the column's `scale`, CockroachDB rounds to the scale. -- If digits to the right of the decimal point are fewer than the column's `scale`, CockroachDB pads to the scale with `0`s. -- If digits to the left and right of the decimal point exceed the column's `precision`, CockroachDB gives an error. -- If the column's `precision` and `scale` are identical, the inserted value must round to less than 1. - -## Syntax - -A constant value of type `DECIMAL` can be entered as a [numeric literal](sql-constants.html#numeric-literals). -For example: `1.414` or `-1234`. - -## Size - -The size of a `DECIMAL` value is variable, starting at 9 bytes. It's recommended to keep values under 64 kilobytes to ensure performance. Above that threshold, [write amplification](https://en.wikipedia.org/wiki/Write_amplification) and other considerations may cause significant performance degradation. - -## Examples - -~~~ sql -> CREATE TABLE decimals (a DECIMAL PRIMARY KEY, b DECIMAL(10,5), c NUMERIC); - -> SHOW COLUMNS FROM decimals; -~~~ -~~~ -+-------+---------------+-------+---------+ -| Field | Type | Null | Default | -+-------+---------------+-------+---------+ -| a | DECIMAL | false | NULL | -| b | DECIMAL(10,5) | true | NULL | -| c | DECIMAL | true | NULL | -+-------+---------------+-------+---------+ -~~~ -~~~ sql -> INSERT INTO decimals VALUES (1.01234567890123456789, 1.01234567890123456789, 1.01234567890123456789); - -> SELECT * FROM decimals; -~~~ -~~~ -+------------------------+---------+-----------------------+ -| a | b | c | -+------------------------+---------+-----------------------+ -| 1.01234567890123456789 | 1.01235 | 1.0123456789012346789 | -+------------------------+---------+-----------------------+ -# The value in "a" matches what was inserted exactly. -# The value in "b" has been rounded to the column's scale. -# The value in "c" is handled like "a" because NUMERIC is an alias. -~~~ - -## Supported Casting & Conversion - -`DECIMAL` values can be [cast](data-types.html#data-type-conversions-casts) to any of the following data types: - -Type | Details ------|-------- -`INT` | Truncates decimal precision -`FLOAT` | Loses precision and may round up to +/- infinity if the value is too large in magnitude, or to +/-0 if the value is too small in magnitude -`BOOL` | **0** converts to `false`; all other values convert to `true` -`STRING` | –– - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/default-value.md b/src/current/v1.0/default-value.md deleted file mode 100644 index 8d21bfa1f24..00000000000 --- a/src/current/v1.0/default-value.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Default Value Constraint -summary: The Default Value constraint specifies a value to populate a column with if none is provided. -toc: true ---- - -The Default Value [constraint](constraints.html) specifies a value to write into the constrained column if one is not defined in an `INSERT` statement. The value may be either a hard-coded literal or an expression that is evaluated at the time the row is created. - - -## Details - -- The [data type](data-types.html) of the Default Value must be the same as the data type of the column. -- The Default Value constraint only applies if the column does not have a value specified in the [`INSERT`](insert.html) statement. You can still insert a *NULL* into an optional (nullable) column by explicitly inserting *NULL*. For example, `INSERT INTO foo VALUES (1, NULL);`. - -## Syntax - -You can only apply the Default Value constraint to individual columns. - -{{site.data.alerts.callout_info}}You can also add the Default Value constraint to an existing table through ALTER COLUMN. {{site.data.alerts.end}} - -{% include {{ page.version.version }}/sql/diagrams/default_value_column_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_name` | The name of the constrained column. | -| `column_type` | The constrained column's [data type](data-types.html). | -| `default_value` | The value you want to insert by default, which must evaluate to the same [data type](data-types.html) as the `column_type`.| -| `column_constraints` | Any other column-level [constraints](constraints.html) you want to apply to this column. | -| `column_def` | Definitions for any other columns in the table. | -| `table_constraints` | Any table-level [constraints](constraints.html) you want to apply. | - -## Usage Example - -~~~ sql -> CREATE TABLE inventories ( - product_id INT, - warehouse_id INT, - quantity_on_hand INT DEFAULT 100, - PRIMARY KEY (product_id, warehouse_id) - ); - -> INSERT INTO inventories (product_id, warehouse_id) VALUES (1,20); - -> INSERT INTO inventories (product_id, warehouse_id, quantity_on_hand) VALUES (2,30, NULL); - -> SELECT * FROM inventories; -~~~ -~~~ -+------------+--------------+------------------+ -| product_id | warehouse_id | quantity_on_hand | -+------------+--------------+------------------+ -| 1 | 20 | 100 | -| 2 | 30 | NULL | -+------------+--------------+------------------+ -~~~ - -If the Default Value constraint is not specified and an explicit value is not given, a value of *NULL* is assigned to the column. - -## See Also - -- [Constraints](constraints.html) -- [`ALTER COLUMN`](alter-column.html) -- [Check constraint](check.html) -- [Foreign Key constraint](foreign-key.html) -- [Not Null constraint](not-null.html) -- [Primary Key constraint](primary-key.html) -- [Unique constraint](unique.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) diff --git a/src/current/v1.0/delete.md b/src/current/v1.0/delete.md deleted file mode 100644 index 7aa6336617a..00000000000 --- a/src/current/v1.0/delete.md +++ /dev/null @@ -1,185 +0,0 @@ ---- -title: DELETE -summary: The DELETE statement deletes one or more rows from a table. -toc: true ---- - -The `DELETE` [statement](sql-statements.html) deletes rows from a table. - -{{site.data.alerts.callout_info}}To delete columns, see DROP COLUMN.{{site.data.alerts.end}} - - -## Required Privileges - -The user must have the `DELETE` and `SELECT` [privileges](privileges.html) on the table. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/delete.html %} - -## Parameters - - - -| Parameter | Description | -|-----------|-------------| -|`relation_expr` | The name of the table you want to delete rows from.

Deleting from multiple tables in a single statement is not supported.| -|`AS name` | Create an alias for the table name, completely hiding its original name. All subsequent references to the table must use its alias.

Aliases are primarily used with `JOIN`, which is not yet supported but is coming in a [future release](https://github.com/cockroachdb/cockroach/issues/2970).| -|`WHERE a_expr`| `a_expr` must be an expression that returns Boolean values using columns (e.g., ` = `). Delete rows that return `TRUE`.

__Without a `WHERE` clause in your statement, `DELETE` removes all rows from the table.__| -|`RETURNING ...`
| Retrieve a table of deleted rows using [all columns](#use-all-columns) (`*`) or [specific columns](#use-specific-columns) (named in `a_expr`).

To return nothing in the response, not even the number of rows affected, use `RETURNING NOTHING`.| -|`AS col_label`| In the retrieved table, change the column label from `a_expr` to `col_label`.

You can also change column labels with an `identifier`, but must follow [these rules](keywords-and-identifiers.html#identifiers).| - -## Success Responses - -Successful `DELETE` statements return one of the following: - -| Response | Description | -|-----------|-------------| -|`DELETE` _`int`_ | _int_ rows were deleted.

`DELETE` statements that do not delete any rows respond with `DELETE 0`. When `RETURNING NOTHING` is used, this information is not included in the response. | -|Retrieved table | Including the `RETURNING` clause retrieves the deleted rows, using the columns identified by the clause's parameters.

[See an example.](#return-deleted-rows)| - -## Disk Space Usage After Deletes - -Deleting a row does not immediately free up the disk space. This is -due to the fact that CockroachDB retains [the ability to query tables -historically](https://www.cockroachlabs.com/blog/time-travel-queries-select-witty_subtitle-the_future/). - -If disk usage is a concern, there are two potential solutions. The -first is to [reduce the time-to-live](configure-replication-zones.html) -(TTL) for the zone, which will cause garbage collection to clean up -deleted rows more frequently. Second, unlike `DELETE`, -[truncate](truncate.html) immediately deletes the entire table, so -consider if you can use `TRUNCATE` instead. - -## Select Performance on Deleted Rows - -Queries that scan across tables that have lots of deleted rows will -have to scan over deletions that have not yet been garbage -collected. Certain database usage patterns that frequently scan over -and delete lots of rows will want to reduce the -[time-to-live](configure-replication-zones.html) values to clean up -deleted rows more frequently. - -## Examples - -### Delete All Rows - -You can delete all rows from a table by not including a `WHERE` clause in your `DELETE` statement. - -~~~ sql -> DELETE FROM account_details; -~~~ -~~~ -DELETE 7 -~~~ - -This is roughly equivalent to [`TRUNCATE`](truncate.html). - -~~~ -> TRUNCATE account_details; -~~~ -~~~ -TRUNCATE -~~~ - -As you can see, one difference is that `TRUNCATE` does not return the number of rows it deleted. - -### Delete Specific Rows - -When deleting specific rows from a table, the most important decision you make is which columns to use in your `WHERE` clause. When making that choice, consider the potential impact of using columns with the [Primary Key](primary-key.html)/[Unique](unique.html) constraints (both of which enforce uniqueness) versus those that are not unique. - -#### Delete Rows Using Primary Key/Unique Columns - -Using columns with the [Primary Key](primary-key.html) or [Unique](unique.html) constraints to delete rows ensures your statement is unambiguous—no two rows contain the same column value, so it's less likely to delete data unintentionally. - -In this example, `account_id` is our primary key and we want to delete the row where it equals 1. Because we're positive no other rows have that value in the `account_id` column, there's no risk of accidentally removing another row. - -~~~ sql -> DELETE FROM account_details WHERE account_id = 1 RETURNING *; -~~~ -~~~ -+------------+---------+--------------+ -| account_id | balance | account_type | -+------------+---------+--------------+ -| 1 | 32000 | Savings | -+------------+---------+--------------+ -~~~ - -#### Delete Rows Using Non-Unique Columns - -Deleting rows using non-unique columns removes _every_ row that returns `TRUE` for the `WHERE` clause's `a_expr`. This can easily result in deleting data you didn't intend to. - -~~~ sql -> DELETE FROM account_details WHERE balance = 30000 RETURNING *; -~~~ -~~~ -+------------+---------+--------------+ -| account_id | balance | account_type | -+------------+---------+--------------+ -| 2 | 30000 | Checking | -| 3 | 30000 | Savings | -+------------+---------+--------------+ -~~~ - -The example statement deleted two rows, which might be unexpected. - -### Return Deleted Rows - -To see which rows your statement deleted, include the `RETURNING` clause to retrieve them using the columns you specify. - -#### Use All Columns -By specifying `*`, you retrieve all columns of the delete rows. - -~~~ sql -> DELETE FROM account_details WHERE balance < 23000 RETURNING *; -~~~ -~~~ -+------------+---------+--------------+ -| account_id | balance | account_type | -+------------+---------+--------------+ -| 4 | 22000 | Savings | -+------------+---------+--------------+ -~~~ - -#### Use Specific Columns - -To retrieve specific columns, name them in the `RETURNING` clause. - -~~~ sql -> DELETE FROM account_details WHERE account_id = 5 RETURNING account_id, account_type; -~~~ -~~~ -+------------+--------------+ -| account_id | account_type | -+------------+--------------+ -| 5 | Checking | -+------------+--------------+ -~~~ - -#### Change Column Labels - -When `RETURNING` specific columns, you can change their labels using `AS`. - -~~~ sql -> DELETE FROM account_details WHERE balance < 22500 RETURNING account_id, balance AS final_balance; -~~~ -~~~ -+------------+---------------+ -| account_id | final_balance | -+------------+---------------+ -| 6 | 23500 | -+------------+---------------+ -~~~ - -## See Also - -- [`INSERT`](insert.html) -- [`TRUNCATE`](truncate.html) -- [`ALTER TABLE`](alter-table.html) -- [`DROP TABLE`](drop-table.html) -- [`DROP DATABASE`](drop-database.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/demo-automatic-cloud-migration.md b/src/current/v1.0/demo-automatic-cloud-migration.md deleted file mode 100644 index 9d16ed30bb4..00000000000 --- a/src/current/v1.0/demo-automatic-cloud-migration.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Cross-Cloud Deployment & Migration -summary: Use a local cluster to simulate migrating from one cloud platform to another. -toc: true ---- - -CockroachDB's flexible [replication controls](configure-replication-zones.html) make it trivially easy to run a single CockroachDB cluster across cloud platforms and to migrate data from one cloud to another without any service interruption. This page walks you through a local simulation of the process. - -## Watch a Live Demo - -{% include_cached youtube.html video_id="cCJkgZy6s2Q" %} - -## Step 1. Install prerequisites - -In this tutorial, you'll use CockroachDB, the HAProxy load balancer, and CockroachDB's version of the YCSB load generator, which requires Go. Before you begin, make sure these applications are installed: - -- Install the latest version of [CockroachDB](install-cockroachdb.html). -- Install [HAProxy](http://www.haproxy.org/). If you're on a Mac and using Homebrew, use `brew install haproxy`. -- Install [Go](https://golang.org/doc/install) version 1.9 or higher. If you're on a Mac and using Homebrew, use `brew install go`. You can check your local version by running `go version`. -- Install the [CockroachDB version of YCSB](https://github.com/cockroachdb/loadgen/tree/master/ycsb): `go get github.com/cockroachdb/loadgen/ycsb` - -Also, to keep track of the data files and logs for your cluster, you may want to create a new directory (e.g., `mkdir cloud-migration`) and start all your nodes in that directory. - -## Step 2. Start a 3-node cluster on "cloud 1" - -If you've already [started a local cluster](start-a-local-cluster.html), the commands for starting nodes should be familiar to you. The new flag to note is [`--locality`](configure-replication-zones.html#descriptive-attributes-assigned-to-nodes), which accepts key-value pairs that describe the topography of a node. In this case, you're using the flag to specify that the first 3 nodes are running on cloud 1. - -In a new terminal, start node 1 on cloud 1: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---locality=cloud=1 \ ---store=cloud1node1 \ ---host=localhost \ ---cache=100MB -~~~~ - -In a new terminal, start node 2 on cloud 1: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---locality=cloud=1 \ ---store=cloud1node2 \ ---host=localhost \ ---port=25258 \ ---http-port=8081 \ ---join=localhost:26257 \ ---cache=100MB -~~~ - -In a new terminal, start node 3 on cloud 1: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---locality=cloud=1 \ ---store=cloud1node3 \ ---host=localhost \ ---port=25259 \ ---http-port=8082 \ ---join=localhost:26257 \ ---cache=100MB -~~~ - -## Step 3. Set up HAProxy load balancing - -You're now running 3 nodes in a simulated cloud. Each of these nodes is an equally suitable SQL gateway to your cluster, but to ensure an even balancing of client requests across these nodes, you can use a TCP load balancer. Let's use the open-source [HAProxy](http://www.haproxy.org/) load balancer that you installed earlier. - -In a new terminal, run the [`cockroach gen haproxy`](generate-cockroachdb-resources.html) command, specifying the port of any node: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach gen haproxy --insecure --host=localhost --port=26257 -~~~ - -This command generates an `haproxy.cfg` file automatically configured to work with the 3 nodes of your running cluster. In the file, change `bind :26257` to `bind :26000`. This changes the port on which HAProxy accepts requests to a port that is not already in use by a node and that will not be used by the nodes you'll add later. - -~~~ -global - maxconn 4096 - -defaults - mode tcp - timeout connect 10s - timeout client 1m - timeout server 1m - -listen psql - bind :26000 - mode tcp - balance roundrobin - server cockroach1 localhost:26257 - server cockroach2 localhost:26258 - server cockroach3 localhost:26259 -~~~ - -Start HAProxy, with the `-f` flag pointing to the `haproxy.cfg` file: - -{% include copy-clipboard.html %} -~~~ shell -$ haproxy -f haproxy.cfg -~~~ - -## Step 4. Start a load generator - -Now that you have a load balancer running in front of your cluster, let's use the YCSB load generator that you installed earlier to simulate multiple client connections, each performing mixed read/write workloads. - -In a new terminal, start `ycsb`, pointing it at HAProxy's port: - -{% include copy-clipboard.html %} -~~~ shell -$ $HOME/go/bin/ycsb -duration 20m -tolerate-errors -concurrency 10 -max-rate 1000 'postgresql://root@localhost:26000?sslmode=disable' -~~~ - -This command initiates 10 concurrent client workloads for 20 minutes, but limits the total load to 1000 operations per second (since you're running everything on a single machine). - -## Step 5. Watch data balance across all 3 nodes - -Now open the Admin UI at `http://localhost:8080` and hover over the **SQL Queries** graph at the top. After a minute or so, you'll see that the load generator is executing approximately 95% reads and 5% writes across all nodes: - -CockroachDB Admin UI - -Scroll down a bit and hover over the **Replicas per Node** graph. Because CockroachDB replicates each piece of data 3 times by default, the replica count on each of your 3 nodes should be identical: - -CockroachDB Admin UI - -## Step 6. Add 3 nodes on "cloud 2" - -At this point, you're running three nodes on cloud 1. But what if you'd like to start experimenting with resources provided by another cloud vendor? Let's try that by adding three more nodes to a new cloud platform. Again, the flag to note is [`--locality`](configure-replication-zones.html#descriptive-attributes-assigned-to-nodes), which you're using to specify that these next 3 nodes are running on cloud 2. - -In a new terminal, start node 4 on cloud 2: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---locality=cloud=2 \ ---store=cloud2node4 \ ---host=localhost \ ---port=26260 \ ---http-port=8083 \ ---join=localhost:26257 \ ---cache=100MB -~~~ - -In a new terminal, start node 5 on cloud 2: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---locality=cloud=2 \ ---store=cloud2node5 \ ---host=localhost \ ---port=25261 \ ---http-port=8084 \ ---join=localhost:26257 \ ---cache=100MB -~~~ - -In a new terminal, start node 6 on cloud 2: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---locality=cloud=2 \ ---store=cloud2node6 \ ---host=localhost \ ---port=25262 \ ---http-port=8085 \ ---join=localhost:26257 \ ---cache=100MB -~~~ - -## Step 7. Watch data balance across all 6 nodes - -Back in the Admin UI, hover over the **Replicas per Node** graph again. Because you used [`--locality`](configure-replication-zones.html#descriptive-attributes-assigned-to-nodes) to specify that nodes are running on 2 clouds, you'll see an approximately even number of replicas on each node, indicating that CockroachDB has automatically rebalanced replicas across both simulated clouds: - -CockroachDB Admin UI - -Note that it takes a few minutes for the Admin UI to show accurate per-node replica counts on hover. This is why the new nodes in the screenshot above show 0 replicas. However, the graph lines are accurate, and you can click **View node list** in the **Summary** area for accurate per-node replica counts as well. - -## Step 8. Migrate all data to "cloud 2" - -So your cluster is replicating across two simulated clouds. But let's say that after experimentation, you're happy with cloud vendor 2, and you decide that you'd like to move everything there. Can you do that without interruption to your live client traffic? Yes, and it's as simple as running a single command to add a [hard constraint](configure-replication-zones.html#replication-constraints) that all replicas must be on nodes with `--locality=cloud=2`. - -In a new terminal, edit the default replication zone: - -{% include copy-clipboard.html %} -~~~ shell -$ echo 'constraints: [+cloud=2]' | cockroach zone set .default --insecure --host=localhost -f - -~~~ - -## Step 9. Verify the data migration - -Back in the Admin UI, hover over the **Replicas per Node** graph again. Very soon, you'll see the replica count double on nodes 4, 5, and 6 and drop to 0 on nodes 1, 2, and 3: - -CockroachDB Admin UI - -This indicates that all data has been migrated from cloud 1 to cloud 2. In a real cloud migration scenario, at this point you would update the load balancer to point to the nodes on cloud 2 and then stop the nodes on cloud 1. But for the purpose of this local simulation, there's no need to do that. - -## Step 10. Stop the cluster - -Once you're done with your cluster, stop YCSB by switching into its terminal and pressing **CTRL-C**. Then do the same for HAProxy and each CockroachDB node. - -{{site.data.alerts.callout_success}}For the last node, the shutdown process will take longer (about a minute) and will eventually force stop the node. This is because, with only 1 node still online, a majority of replicas are no longer available (2 of 3), and so the cluster is not operational. To speed up the process, press CTRL-C a second time.{{site.data.alerts.end}} - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores and the HAProxy config file: - -{% include copy-clipboard.html %} -~~~ shell -$ rm -rf cloud1node1 cloud1node2 cloud1node3 cloud2node4 cloud2node5 cloud2node6 haproxy.cfg -~~~ - -## What's Next? - -Use a local cluster to explore these other core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) - -You may also want to learn other ways to control the location and number of replicas in a cluster: - -- [Even Replication Across Datacenters](configure-replication-zones.html#even-replication-across-datacenters) -- [Multiple Applications Writing to Different Databases](configure-replication-zones.html#multiple-applications-writing-to-different-databases) -- [Stricter Replication for a Specific Table](configure-replication-zones.html#stricter-replication-for-a-specific-table) -- [Tweaking the Replication of System Ranges](configure-replication-zones.html#tweaking-the-replication-of-system-ranges) diff --git a/src/current/v1.0/demo-automatic-rebalancing.md b/src/current/v1.0/demo-automatic-rebalancing.md deleted file mode 100644 index 52de2931a8d..00000000000 --- a/src/current/v1.0/demo-automatic-rebalancing.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Automatic Rebalancing -summary: Use a local cluster to explore how CockroachDB automatically rebalances data as you scale. -toc: true ---- - -This page walks you through a simple demonstration of how CockroachDB automatically rebalances data as you scale. Starting with a 3-node local cluster, you'll lower the maximum size for a single range, the unit of data that is replicated in CockroachDB. You'll then download and run the `block_writer` example program, which continuously inserts data into your cluster, and watch the replica count quickly increase as ranges split. You'll then add 2 more nodes and watch how CockroachDB automatically rebalances replicas to efficiently use all available capacity. - - -## Before You Begin - -In this tutorial, you'll use an example Go program to quickly insert data into a CockroachDB cluster. To run the example program, you must have a [Go environment](http://golang.org/doc/code.html) with a 64-bit version of Go 1.7.1. - -- You can download the [Go binary](http://golang.org/doc/code.html) directly from the official site. -- Be sure to set the `$GOPATH` and `$PATH` environment variables as described [here](https://golang.org/doc/code.html#GOPATH). - -## Step 1. Start a 3-node cluster - -{{site.data.alerts.callout_success}}See Start a Local Cluster for details about cockroach start options.{{site.data.alerts.end}} - -~~~ shell -# In a new terminal, start node 1: -$ cockroach start --insecure \ ---store=scale-node1 \ ---host=localhost - -# In a new terminal, start node 2: -$ cockroach start --insecure \ ---store=scale-node2 \ ---host=localhost \ ---port=26258 \ ---http-port=8081 \ ---join=localhost:26257 - -# In a new terminal, start node 3: -$ cockroach start --insecure \ ---store=scale-node3 \ ---host=localhost \ ---port=26259 \ ---http-port=8082 \ ---join=localhost:26257 -~~~ - -In a new terminal, connect the [built-in SQL shell](use-the-built-in-sql-client.html) to any node to verify that the cluster is live: - -~~~ shell -$ cockroach sql --insecure -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -~~~ sql -> SHOW DATABASES; -~~~ - -~~~ -+--------------------+ -| Database | -+--------------------+ -| crdb_internal | -| information_schema | -| pg_catalog | -| system | -+--------------------+ -(4 rows) -~~~ - -Exit the SQL shell: - -~~~ sql -> \q -~~~ - -## Step 2. Lower the max range size - -In CockroachDB, you use [replication zones](configure-replication-zones.html) to control the number and location of replicas. Initially, there is a single default replication zone for the entire cluster that is set to copy each range of data 3 times. This default replication factor is fine for this demo. - -However, the default replication zone also defines the size at which a single range of data spits into two ranges. Since you want to create many ranges quickly and then see how CockroachDB automatically rebalances them, reduce the max range size from the default 67108864 bytes (64MB) to cause ranges to split more quickly: - -~~~ shell -$ echo -e "range_min_bytes: 1\nrange_max_bytes: 262144" | cockroach zone set .default --insecure -f - -~~~ - -~~~ -range_min_bytes: 1 -range_max_bytes: 262144 -gc: - ttlseconds: 86400 -num_replicas: 3 -constraints: [] -~~~ - -## Step 3. Download and run the `block_writer` program - -CockroachDB provides a number of [example programs in Go](https://github.com/cockroachdb/examples-go) for simulating client workloads. The program you'll use for this demonstration is called [`block_writer`](https://github.com/cockroachdb/examples-go/tree/master/block_writer). It will simulate multiple clients inserting data into the cluster. - -Download and install the program: - -~~~ shell -$ go get github.com/cockroachdb/examples-go/block_writer -~~~ - -Then run the program for 1 minute, long enough to generate plenty of ranges: - -~~~ shell -$ block_writer -duration 1m -~~~ - -Once it's running, `block_writer` will output the number of rows written per second: - -~~~ shell - 1s: 776.7/sec 776.7/sec - 2s: 696.3/sec 736.7/sec - 3s: 659.9/sec 711.1/sec - 4s: 557.4/sec 672.6/sec - 5s: 485.0/sec 635.1/sec - 6s: 563.5/sec 623.2/sec - 7s: 725.2/sec 637.7/sec - 8s: 779.2/sec 655.4/sec - 9s: 859.0/sec 678.0/sec -10s: 960.4/sec 706.1/sec -~~~ - -## Step 4. Watch the replica count increase - -Open the Admin UI at `http://localhost:8080`, click **View nodes list** on the right, and you’ll see the bytes, replica count, and other metrics increase as the `block_writer` program inserts data. - -CockroachDB Admin UI - -## Step 5. Add 2 more nodes - -Adding capacity is as simple as starting more nodes and joining them to the running cluster: - -~~~ shell -# In a new terminal, start node 4: -$ cockroach start --insecure \ ---store=scale-node4 \ ---host=localhost \ ---port=26260 \ ---http-port=8083 \ ---join=localhost:26257 - -# In a new terminal, start node 5: -$ cockroach start --insecure \ ---store=scale-node5 \ ---host=localhost \ ---port=26261 \ ---http-port=8084 \ ---join=localhost:26257 -~~~ - -## Step 6. Watch data rebalance across all 5 nodes - -Back in the Admin UI, you'll now see 5 nodes listed. At first, the bytes and replica count will be lower for nodes 4 and 5. Very soon, however, you'll see those metrics even out across all nodes, indicating that data has been automatically rebalanced to utilize the additional capacity of the new nodes. - -CockroachDB Admin UI - -## Step 7. Stop the cluster - -Once you're done with your test cluster, stop each node by switching to its terminal and pressing **CTRL-C**. - -{{site.data.alerts.callout_success}}For the last node, the shutdown process will take longer (about a minute) and will eventually force stop the node. This is because, with only 1 node still online, a majority of replicas are no longer available (2 of 3), and so the cluster is not operational. To speed up the process, press CTRL-C a second time.{{site.data.alerts.end}} - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores: - -~~~ shell -$ rm -rf scale-node1 scale-node2 scale-node3 scale-node4 scale-node5 -~~~ - -## What's Next? - -Use a local cluster to explore these other core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/demo-data-replication.md b/src/current/v1.0/demo-data-replication.md deleted file mode 100644 index 7e41b70763f..00000000000 --- a/src/current/v1.0/demo-data-replication.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: Data Replication -summary: Use a local cluster to explore how CockroachDB replicates and distributes data. -toc: true ---- - -This page walks you through a simple demonstration of how CockroachDB replicates and distributes data. Starting with a 1-node local cluster, you'll write some data, add 2 nodes, and watch how the data is replicated automatically. You'll then update the cluster to replicate 5 ways, add 2 more nodes, and again watch how all existing replicas are re-replicated to the new nodes. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Start a 1-node cluster - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---store=repdemo-node1 \ ---host=localhost -~~~ - -## Step 2. Write data - -In a new terminal, use the [`cockroach gen`](generate-cockroachdb-resources.html) command to generate an example `intro` database: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach gen example-data intro | cockroach sql --insecure -~~~ - -~~~ -CREATE DATABASE -SET -DROP TABLE -CREATE TABLE -INSERT 1 -INSERT 1 -INSERT 1 -INSERT 1 -... -~~~ - -In the same terminal, open the [built-in SQL shell](use-the-built-in-sql-client.html) and verify that the new `intro` database was added with one table, `mytable`: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -~~~ - -~~~ -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SHOW DATABASES; -~~~ - -~~~ -+--------------------+ -| Database | -+--------------------+ -| information_schema | -| pg_catalog | -| intro | -| system | -+--------------------+ -(4 rows) -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SHOW TABLES FROM intro; -~~~ - -~~~ -+---------+ -| Table | -+---------+ -| mytable | -+---------+ -(1 row) -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM intro.mytable WHERE (l % 2) = 0; -~~~ - -~~~ -+----+-----------------------------------------------------+ -| l | v | -+----+-----------------------------------------------------+ -| 0 | !__aaawwmqmqmwwwaas,,_ .__aaawwwmqmqmwwaaa,, | -| 2 | !"VT?!"""^~~^"""??T$Wmqaa,_auqmWBT?!"""^~~^^""??YV^ | -| 4 | ! "?##mW##?"- | -| 6 | ! C O N G R A T S _am#Z??A#ma, Y | -| 8 | ! _ummY" "9#ma, A | -| 10 | ! vm#Z( )Xmms Y | -| 12 | ! .j####mmm#####mm#m##6. | -| 14 | ! W O W ! jmm###mm######m#mmm##6 | -| 16 | ! ]#me*Xm#m#mm##m#m##SX##c | -| 18 | ! dm#||+*$##m#mm#m#Svvn##m | -| 20 | ! :mmE=|+||S##m##m#1nvnnX##; A | -| 22 | ! :m#h+|+++=Xmm#m#1nvnnvdmm; M | -| 24 | ! Y $#m>+|+|||##m#1nvnnnnmm# A | -| 26 | ! O ]##z+|+|+|3#mEnnnnvnd##f Z | -| 28 | ! U D 4##c|+|+|]m#kvnvnno##P E | -| 30 | ! I 4#ma+|++]mmhvnnvq##P` ! | -| 32 | ! D I ?$#q%+|dmmmvnnm##! | -| 34 | ! T -4##wu#mm#pw##7' | -| 36 | ! -?$##m####Y' | -| 38 | ! !! "Y##Y"- | -| 40 | ! | -+----+-----------------------------------------------------+ -(21 rows) -~~~ - -Exit the SQL shell: - -{% include copy-clipboard.html %} -~~~ sql -> \q -~~~ - -## Step 3. Add two nodes - -In a new terminal, add node 2: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---store=repdemo-node2 \ ---host=localhost \ ---port=26258 \ ---http-port=8081 \ ---join=localhost:26257 -~~~ - -In a new terminal, add node 3: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---store=repdemo-node3 \ ---host=localhost \ ---port=26259 \ ---http-port=8082 \ ---join=localhost:26257 -~~~ - -## Step 4. Watch data replicate to the new nodes - -Open the Admin UI at `http://localhost:8080` and click **View nodes list** on the right. You'll see that all three nodes are listed. At first, the replica count will be lower for nodes 2 and 3. Very soon, the replica count will be identical across all three nodes, indicating that all data in the cluster has been replicated 3 times; there's a copy of every piece of data on each node. - -CockroachDB Admin UI - -## Step 5. Increase the replication factor - -As you just saw, CockroachDB replicates data 3 times by default. Now, in the terminal you used for the built-in SQL shell or in a new terminal, edit the default [replication zone](configure-replication-zones.html) to replicate data 5 times: - -{% include copy-clipboard.html %} -~~~ shell -$ echo 'num_replicas: 5' | cockroach zone set .default --insecure -f - -~~~ - -~~~ -range_min_bytes: 1048576 -range_max_bytes: 67108864 -gc: - ttlseconds: 86400 -num_replicas: 5 -constraints: [] -~~~ - -## Step 6. Add two more nodes - -In a new terminal, add node 4: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---host=localhost \ ---store=repdemo-node4 \ ---port=26260 \ ---http-port=8083 \ ---join=localhost:26257 -~~~ - -In a new terminal, add node 5: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---host=localhost \ ---store=repdemo-node5 \ ---port=26261 \ ---http-port=8084 \ ---join=localhost:26257 -~~~ - -## Step 7. Watch data replicate to the new nodes - -Back in the Admin UI, you'll see that there are now 5 nodes listed. Again, at first, the replica count will be lower for nodes 4 and 5. But because you changed the default replication factor to 5, very soon, the replica count will be identical across all 5 nodes, indicating that all data in the cluster has been replicated 5 times. - -CockroachDB Admin UI - -## Step 8. Stop the cluster - -Once you're done with your test cluster, stop each node by switching to its terminal and pressing **CTRL-C**. - -{{site.data.alerts.callout_success}}For the last 2 nodes, the shutdown process will take longer (about a minute) and will eventually force stop the nodes. This is because, with only 2 nodes still online, a majority of replicas are no longer available (3 of 5), and so the cluster is not operational. To speed up the process, press CTRL-C a second time in the nodes' terminals.{{site.data.alerts.end}} - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores: - -{% include copy-clipboard.html %} -~~~ shell -$ rm -rf repdemo-node1 repdemo-node2 repdemo-node3 repdemo-node4 repdemo-node5 -~~~ - -## What's Next? - -Use a local cluster to explore these other core CockroachDB features: - -- [Fault Tolerance & Recovery](demo-fault-tolerance-and-recovery.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/demo-fault-tolerance-and-recovery.md b/src/current/v1.0/demo-fault-tolerance-and-recovery.md deleted file mode 100644 index 2cb9be46540..00000000000 --- a/src/current/v1.0/demo-fault-tolerance-and-recovery.md +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: Fault Tolerance & Recovery -summary: Use a local cluster to explore how CockroachDB remains available during, and recovers after, failure. -toc: true ---- - -This page walks you through a simple demonstration of how CockroachDB remains available during, and recovers after, failure. Starting with a 3-node local cluster, you'll remove a node and see how the cluster continues uninterrupted. You'll then write some data while the node is offline, rejoin the node, and see how it catches up with the rest of the cluster. Finally, you'll add a fourth node, remove a node again, and see how missing replicas eventually re-replicate to the new node. - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Start a 3-node cluster - -{{site.data.alerts.callout_success}}See Start a Local Cluster for details about cockroach start options.{{site.data.alerts.end}} - -~~~ shell -# In a new terminal, start node 1: -$ cockroach start --insecure \ ---store=fault-node1 \ ---host=localhost - -# In a new terminal, start node 2: -$ cockroach start --insecure \ ---store=fault-node2 \ ---host=localhost \ ---port=26258 \ ---http-port=8081 \ ---join=localhost:26257 - -# In a new terminal, start node 3: -$ cockroach start --insecure \ ---store=fault-node3 \ ---host=localhost \ ---port=26259 \ ---http-port=8082 \ ---join=localhost:26257 -~~~ - -## Step 2. Verify that the cluster is live - -In a new terminal, connect the [built-in SQL shell](use-the-built-in-sql-client.html) to any node: - -~~~ shell -$ cockroach sql --insecure -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -~~~ sql -> SHOW DATABASES; -~~~ - -~~~ -+--------------------+ -| Database | -+--------------------+ -| crdb_internal | -| information_schema | -| pg_catalog | -| system | -+--------------------+ -(4 rows) -~~~ - -Exit the SQL shell: - -~~~ sql -> \q -~~~ - -## Step 3. Remove a node temporarily - -In the terminal running node 2, press **CTRL-C** to stop the node. - -Alternatively, you can open a new terminal and run the [`cockroach quit`](stop-a-node.html) command against port `26258`: - -~~~ shell -$ cockroach quit --insecure --port=26258 -~~~ - -~~~ -initiating graceful shutdown of server -ok -~~~ - -## Step 4. Verify that the cluster remains available - -Switch to the terminal for the built-in SQL shell and reconnect the shell to node 1 (port `26257`) or node 3 (port `26259`): - -~~~ shell -$ cockroach sql --insecure --port=26259 -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -~~~ sql -> SHOW DATABASES; -~~~ - -~~~ -+--------------------+ -| Database | -+--------------------+ -| information_schema | -| pg_catalog | -| bank | -| system | -+--------------------+ -(4 rows) -~~~ - -As you see, despite one node being offline, the cluster continues uninterrupted because a majority of replicas (2/3) remains available. If you were to remove another node, however, leaving only one node live, the cluster would be unresponsive until another node was brought back online. - -Exit the SQL shell: - -~~~ sql -> \q -~~~ - -## Step 5. Write data while the node is offline - -In the same terminal, use the [`cockroach gen`](generate-cockroachdb-resources.html) command to generate an example `startrek` database: - -
cockroach gen example-data startrek | cockroach sql --insecure
-
-
- -~~~ -CREATE DATABASE -SET -DROP TABLE -DROP TABLE -CREATE TABLE -INSERT 79 -CREATE TABLE -INSERT 200 -~~~ - -Then reconnect the SQL shell to node 1 (port `26257`) or node 3 (port `26259`) and verify that the new `startrek` database was added with two tables, `episodes` and `quotes`: - -~~~ shell -$ cockroach sql --insecure --port=26259 -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -~~~ sql -> SHOW DATABASES; -~~~ - -~~~ -+--------------------+ -| Database | -+--------------------+ -| crdb_internal | -| information_schema | -| pg_catalog | -| startrek | -| system | -+--------------------+ -(5 rows) -~~~ - -~~~ sql -> SHOW TABLES FROM startrek; -~~~ - -~~~ -+----------+ -| Table | -+----------+ -| episodes | -| quotes | -+----------+ -(2 rows) -~~~ - -~~~ sql -> SELECT * FROM startrek.episodes LIMIT 10; -~~~ - -~~~ -+----+--------+-----+--------------------------------+----------+ -| id | season | num | title | stardate | -+----+--------+-----+--------------------------------+----------+ -| 1 | 1 | 1 | The Man Trap | 1531.1 | -| 2 | 1 | 2 | Charlie X | 1533.6 | -| 3 | 1 | 3 | Where No Man Has Gone Before | 1312.4 | -| 4 | 1 | 4 | The Naked Time | 1704.2 | -| 5 | 1 | 5 | The Enemy Within | 1672.1 | -| 6 | 1 | 6 | Mudd's Women | 1329.8 | -| 7 | 1 | 7 | What Are Little Girls Made Of? | 2712.4 | -| 8 | 1 | 8 | Miri | 2713.5 | -| 9 | 1 | 9 | Dagger of the Mind | 2715.1 | -| 10 | 1 | 10 | The Corbomite Maneuver | 1512.2 | -+----+--------+-----+--------------------------------+----------+ -(10 rows) -~~~ - -Exit the SQL shell: - -~~~ sql -> \q -~~~ - -## Step 6. Rejoin the node to the cluster - -Switch to the terminal for node 2, and rejoin the node to the cluster, using the same command that you used in step 1: - -~~~ shell -$ cockroach start --insecure \ ---store=fault-node2 \ ---host=localhost \ ---port=26258 \ ---http-port=8081 \ ---join=localhost:26257 -~~~ - -~~~ -CockroachDB node starting at {{ now | date: "%Y-%m-%d %H:%M:%S.%6 +0000 UTC" }} -build: CCL {{page.release_info.version}} @ {{page.release_info.build_time}} -admin: http://localhost:8081 -sql: postgresql://root@localhost:26258?sslmode=disable -logs: node2/logs -store[0]: path=fault-node2 -status: restarted pre-existing node -clusterID: {5638ba53-fb77-4424-ada9-8a23fbce0ae9} -nodeID: 2 -~~~ - -## Step 7. Verify that the rejoined node has caught up - -Switch to the terminal for the built-in SQL shell, connect the shell to the rejoined node 2 (port `26258`), and check for the `startrek` data that was added while the node was offline: - -~~~ shell -$ cockroach sql --insecure --port=26258 -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -~~~ sql -> SELECT * FROM startrek.episodes LIMIT 10; -~~~ - -~~~ -+----+--------+-----+--------------------------------+----------+ -| id | season | num | title | stardate | -+----+--------+-----+--------------------------------+----------+ -| 1 | 1 | 1 | The Man Trap | 1531.1 | -| 2 | 1 | 2 | Charlie X | 1533.6 | -| 3 | 1 | 3 | Where No Man Has Gone Before | 1312.4 | -| 4 | 1 | 4 | The Naked Time | 1704.2 | -| 5 | 1 | 5 | The Enemy Within | 1672.1 | -| 6 | 1 | 6 | Mudd's Women | 1329.8 | -| 7 | 1 | 7 | What Are Little Girls Made Of? | 2712.4 | -| 8 | 1 | 8 | Miri | 2713.5 | -| 9 | 1 | 9 | Dagger of the Mind | 2715.1 | -| 10 | 1 | 10 | The Corbomite Maneuver | 1512.2 | -+----+--------+-----+--------------------------------+----------+ -(10 rows) -~~~ - -At first, while node 2 is catching up, it acts as a proxy to one of the other nodes with the data. This shows that even when a copy of the data is not local to the node, it has seamless access. - -Soon enough, node 2 catches up entirely. To verify, open the Admin UI at `http://localhost:8080`, click **View nodes list** on the right, and you'll see that all three nodes are listed, and the replica count is identical for each. This means that all data in the cluster has been replicated 3 times; there's a copy of every piece of data on each node. - -{{site.data.alerts.callout_success}}CockroachDB replicates data 3 times by default. You can customize the number and location of replicas for the entire cluster or for specific sets of data using replication zones.{{site.data.alerts.end}} - -CockroachDB Admin UI - -## Step 8. Add another node - -Now, to prepare the cluster for a permanent node failure, open a new terminal and add a fourth node: - -~~~ shell -$ cockroach start --insecure \ ---store=fault-node4 \ ---host=localhost \ ---port=26260 \ ---http-port=8083 \ ---join=localhost:26257 -~~~ - -~~~ -CockroachDB node starting at {{ now | date: "%Y-%m-%d %H:%M:%S.%6 +0000 UTC" }} -build: CCL {{page.release_info.version}} @ {{page.release_info.build_time}} -admin: http://localhost:8083 -sql: postgresql://root@localhost:26260?sslmode=disable -logs: node4/logs -store[0]: path=fault-node4 -status: initialized new node, joined pre-existing cluster -clusterID: {5638ba53-fb77-4424-ada9-8a23fbce0ae9} -nodeID: 4 -~~~ - -## Step 9. Remove a node permanently - -Again, switch to the terminal running node 2 and press **CTRL-C** to stop it. - -Alternatively, you can open a new terminal and run the [`cockroach quit`](stop-a-node.html) command against port `26258`: - -~~~ shell -$ cockroach quit --insecure --port=26258 -~~~ - -~~~ -initiating graceful shutdown of server -ok -server drained and shutdown completed -~~~ - -## Step 10. Verify that the cluster re-replicates missing replicas - -Back in the Admin UI, you'll see 4 nodes listed. After about 1 minute, the dot next to node 2 will turn yellow, indicating that the node is not responding. - -CockroachDB Admin UI - -After about 10 minutes, node 2 will move into a **Dead Nodes** section, indicating that the node is not expected to come back. At this point, in the **Live Nodes** section, you should also see that the **Replicas** count for node 4 matches the count for node 1 and 3, the other live nodes. This indicates that all missing replicas (those that were on node 2) have been re-replicated to node 4. - -CockroachDB Admin UI - -## Step 11. Stop the cluster - -Once you're done with your test cluster, stop each node by switching to its terminal and pressing **CTRL-C**. - -{{site.data.alerts.callout_success}}For the last node, the shutdown process will take longer (about a minute) and will eventually force stop the node. This is because, with only 1 node still online, a majority of replicas are no longer available (2 of 3), and so the cluster is not operational. To speed up the process, press CTRL-C a second time.{{site.data.alerts.end}} - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores: - -~~~ shell -$ rm -rf fault-node1 fault-node2 fault-node3 fault-node4 fault-node5 -~~~ - -## What's Next? - -Use a local cluster to explore these other core CockroachDB features: - -- [Data Replication](demo-data-replication.html) -- [Automatic Rebalancing](demo-automatic-rebalancing.html) -- [Automatic Cloud Migration](demo-automatic-cloud-migration.html) diff --git a/src/current/v1.0/deploy-cockroachdb-on-aws-insecure.md b/src/current/v1.0/deploy-cockroachdb-on-aws-insecure.md deleted file mode 100644 index 4b8e5f6e598..00000000000 --- a/src/current/v1.0/deploy-cockroachdb-on-aws-insecure.md +++ /dev/null @@ -1,293 +0,0 @@ ---- -title: Deploy CockroachDB on AWS EC2 (Insecure) -summary: Learn how to deploy CockroachDB on Amazon's AWS EC2 platform. -toc: true -toc_not_nested: true ---- - - - -This page shows you how to manually deploy an insecure multi-node CockroachDB cluster on Amazon's AWS EC2 platform, using AWS's managed load balancing service to distribute client traffic. - -{{site.data.alerts.callout_danger}}If you plan to use CockroachDB in production, we strongly recommend using a secure cluster instead. Select Secure above for instructions.{{site.data.alerts.end}} - - -## Requirements - -You must have SSH access ([key pairs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)/[SSH login](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html)) to each machine with root or sudo privileges. This is necessary for distributing binaries and starting CockroachDB. - -## Recommendations - -- If you plan to use CockroachDB in production, we recommend using a [secure cluster](deploy-cockroachdb-on-aws.html) instead. Using an insecure cluster comes with risks: - - Your cluster is open to any client that can access any node's IP addresses. - - Any user, even `root`, can log in without providing a password. - - Any user, connecting as `root`, can read or write any data in your cluster. - - There is no network encryption or authentication, and thus no confidentiality. - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -- All instances running CockroachDB should be members of the same Security Group. - -- Decide how you want to access your Admin UI: - - Only from specific IP addresses, which requires you to set firewall rules to allow communication on port `8080` *(documented on this page)* - - Using an SSH tunnel, which requires you to use `--http-host=localhost` when starting your nodes - -## Step 1. Configure your network - -CockroachDB requires TCP communication on two ports: - -- `26257` for inter-node communication (i.e., working as a cluster), for applications to connect to the load balancer, and for routing from the load balancer to nodes -- `8080` for exposing your Admin UI - -You can create these rules using [Security Groups' Inbound Rules](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html#adding-security-group-rule). - -#### Inter-node and load balancer-node communication - -| Field | Recommended Value | -|-------|-------------------| -| Type | Custom TCP Rule | -| Protocol | TCP | -| Port Range | **26257** | -| Source | The name of your security group (e.g., *sg-07ab277a*) | - -#### Admin UI - -| Field | Recommended Value | -|-------|-------------------| -| Type | Custom TCP Rule | -| Protocol | TCP | -| Port Range | **8080** | -| Source | Your network's IP ranges | - -#### Application data - -| Field | Recommended Value | -|-------|-------------------| -| Type | Custom TCP Rules | -| Protocol | TCP | -| Port Range | **26257** | -| Source | Your application's IP ranges | - -## Step 2. Create instances - -[Create an instance](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/launching-instance.html) for each node you plan to have in your cluster. We [recommend](recommended-production-settings.html#cluster-topology): - -- Running at least 3 CockroachDB nodes to ensure survivability. -- Selecting the same continent for all of your instances for best performance. - -## Step 3. Set up load balancing - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - -AWS offers fully-managed load balancing to distribute traffic between instances. - -1. [Add AWS load balancing](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-increase-availability.html). Be sure to: - - Set forwarding rules to route TCP traffic from the load balancer's port **26257** to port **26257** on the node Droplets. - - Configure health checks to use HTTP port **8080** and path `/health`. -2. Note the provisioned **IP Address** for the load balancer. You'll use this later to test load balancing and to connect your application to the cluster. - -{{site.data.alerts.callout_info}}If you would prefer to use HAProxy instead of AWS's managed load balancing, see Manual Deployment for guidance.{{site.data.alerts.end}} - -## Step 4. Start the first node - -1. SSH to your instance: - - ~~~ shell - $ ssh -i @ - ~~~ - -2. Install the latest CockroachDB binary: - - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node, which will communicate with other nodes on its internal IP address: - - ~~~ shell - $ cockroach start --insecure \ - --background - ~~~ - -## Step 5. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by setting up additional nodes that will join the cluster. - -1. SSH to another instance: - - ~~~ - $ ssh -i @ - ~~~ - -2. Install CockroachDB from our latest binary: - - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's internal IP address: - - ~~~ shell - $ cockroach start --insecure \ - --background \ - --join=:26257 - ~~~ - -4. Repeat these steps for each instance you want to use as a node. - -## Step 6. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. SSH to your first node: - - ~~~ shell - $ ssh -i @ - ~~~ - -2. Launch the built-in SQL client and create a database: - - ~~~ shell - $ cockroach sql --insecure - ~~~ - ~~~ sql - > CREATE DATABASE insecurenodetest; - ~~~ - -3. In another terminal window, SSH to another node: - - ~~~ shell - $ ssh -i @ - ~~~ - -4. Launch the built-in SQL client: - - ~~~ shell - $ cockroach sql --insecure - ~~~ - -5. View the cluster's databases, which will include `insecurenodetest`: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 7. Test load balancing - -The AWS load balancer created in [step 3](#step-3-set-up-load-balancing) can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients can connect to the load balancer, which will then redirect the connection to a CockroachDB node. - -To test this, install CockroachDB locally and use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if it's not there already. - -2. Launch the built-in SQL client, with the `--host` flag set to the load balancer's IP address: - - ~~~ shell - $ cockroach sql --insecure \ - --host= \ - --port=26257 - ~~~ - -3. View the cluster's databases: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, the load balancer redirected the query to one of the CockroachDB nodes. - -4. Check which node you were redirected to: - - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - ~~~ - +---------+ - | node_id | - +---------+ - | 3 | - +---------+ - (1 row) - ~~~ - -5. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Monitor the cluster - -View your cluster's Admin UI by going to `http://:8080`. - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - -2. Click the **Databases** tab on the left to verify that `insecurenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## Step 9. Use the database - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the AWS load balancer, not to a CockroachDB node. - -## See Also - -- [Google Cloud Platform GCE Deployment](deploy-cockroachdb-on-google-cloud-platform.html) -- [Digital Ocean Deployment](deploy-cockroachdb-on-digital-ocean.html) -- [Azure Deployment](deploy-cockroachdb-on-microsoft-azure.html) -- [Manual Deployment](manual-deployment.html) -- [Orchestration](orchestration.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/deploy-cockroachdb-on-aws.md b/src/current/v1.0/deploy-cockroachdb-on-aws.md deleted file mode 100644 index 92d4c0d5482..00000000000 --- a/src/current/v1.0/deploy-cockroachdb-on-aws.md +++ /dev/null @@ -1,451 +0,0 @@ ---- -title: Deploy CockroachDB on AWS EC2 -summary: Learn how to deploy CockroachDB on Amazon's AWS EC2 platform. -toc: true -toc_not_nested: true ---- - -
- - -
- -This page shows you how to manually deploy a secure multi-node CockroachDB cluster on Amazon's AWS EC2 platform, using AWS's managed load balancing service to distribute client traffic. - -If you are only testing CockroachDB, or you are not concerned with protecting network communication with TLS encryption, you can use an insecure cluster instead. Select **Insecure** above for instructions. - - -## Requirements - -- Locally, you must have [CockroachDB installed](install-cockroachdb.html), which you'll use to generate and manage your deployment's certificates. - -- In AWS, you must have SSH access ([key pairs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)/[SSH login](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html)) to each machine with root or sudo privileges. This is necessary for distributing binaries and starting CockroachDB. - -## Recommendations - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -- All instances running CockroachDB should be members of the same Security Group. - -- Decide how you want to access your Admin UI: - - Only from specific IP addresses, which requires you to set firewall rules to allow communication on port `8080` *(documented on this page)* - - Using an SSH tunnel, which requires you to use `--http-host=localhost` when starting your nodes - -## Step 1. Configure your network - -CockroachDB requires TCP communication on two ports: - -- `26257` for inter-node communication (i.e., working as a cluster), for applications to connect to the load balancer, and for routing from the load balancer to nodes -- `8080` for exposing your Admin UI - -You can create these rules using [Security Groups' Inbound Rules](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html#adding-security-group-rule). - -#### Inter-node and load balancer-node communication - -| Field | Recommended Value | -|-------|-------------------| -| Type | Custom TCP Rule | -| Protocol | TCP | -| Port Range | **26257** | -| Source | The name of your security group (e.g., *sg-07ab277a*) | - -#### Admin UI - -| Field | Recommended Value | -|-------|-------------------| -| Type | Custom TCP Rule | -| Protocol | TCP | -| Port Range | **8080** | -| Source | Your network's IP ranges | - -#### Application data - -| Field | Recommended Value | -|-------|-------------------| -| Type | Custom TCP Rules | -| Protocol | TCP | -| Port Range | **26257** | -| Source | Your application's IP ranges | - -## Step 2. Create instances - -[Create an instance](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/launching-instance.html) for each node you plan to have in your cluster. We [recommend](recommended-production-settings.html#cluster-topology): - -- Running at least 3 nodes to ensure survivability. -- Selecting the same continent for all of your instances for best performance. - -## Step 3. Set up load balancing - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - -AWS offers fully-managed load balancing to distribute traffic between instances. - -1. [Add AWS load balancing](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-increase-availability.html). Be sure to: - - Set forwarding rules to route TCP traffic from the load balancer's port **26257** to port **26257** on the node Droplets. - - Configure health checks to use HTTP port **8080** and path `/health`. -2. Note the provisioned **IP Address** for the load balancer. You'll use this later to test load balancing and to connect your application to the cluster. - -{{site.data.alerts.callout_info}}If you would prefer to use HAProxy instead of AWS's managed load balancing, see Manual Deployment for guidance.{{site.data.alerts.end}} - -## Step 4. Generate certificates - -Locally, you'll need to [create the following certificates and keys](create-security-certificates.html): - -- A certificate authority (CA) key pair (`ca.crt` and `ca.key`). -- A node key pair for each node, issued to its IP addresses and any common names the machine uses, as well as to the IP address provisioned for the AWS load balancer (`node.crt` and `node.key`) -- A client key pair for the `root` user (`client.root.crt` and `client.root.key`). - -{{site.data.alerts.callout_success}}Before beginning, it's useful to collect each of your machine's internal and external IP addresses, as well as any server names you want to issue certificates for.{{site.data.alerts.end}} - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if you haven't already. - -2. Create two directories: - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir certs - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir my-safe-directory - ~~~ - - `certs`: You'll generate your CA certificate and all node and client certificates and keys in this directory and then upload some of the files to your nodes. - - `my-safe-directory`: You'll generate your CA key in this directory and then reference the key when generating node and client certificates. After that, you'll keep the key safe and secret; you will not upload it to your nodes. - -3. Create the CA certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-ca \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -4. Create the certificate and key for the first node, issued to all common names you might use to refer to the node as well as to addresses provisioned for the AWS load balancer: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - `` which is the instance's **Internal IP**. - - `` which is the instance's **External IP address**. - - `` which is the instance's hostname. You can find this by SSHing into a server and running `hostname`. For many AWS EC2 servers, this is `ip-` followed by the internal IP address delimited by dashes; e.g., `ip-172-31-18-168`. - - `` which include any domain names you point to the instance. - - `localhost` and `127.0.0.1` - - `` - - `` - -5. Upload certificates to the first node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh -i @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp -i \ - certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -6. Delete the local copy of the node certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ rm certs/node.crt certs/node.key - ~~~ - - {{site.data.alerts.callout_info}}This is necessary because the certificates and keys for additional nodes will also be named node.crt and node.key As an alternative to deleting these files, you can run the next cockroach cert create-node commands with the --overwrite flag.{{site.data.alerts.end}} - -7. Create the certificate and key for the second node, issued to all common names you might use to refer to the node as well as to addresses provisioned for the AWS load balancer: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -8. Upload certificates to the second node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh -i @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp -i \ - certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -9. Repeat steps 6 - 8 for each additional node. - -10. Create a client certificate and key for the `root` user: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-client \ - root \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {{site.data.alerts.callout_success}}In later steps, you'll use the root user's certificate to run cockroach client commands from your local machine. If you might also want to run cockroach client commands directly on a node (e.g., for local debugging), you'll need to copy the root user's certificate and key to that node as well.{{site.data.alerts.end}} - -## Step 5. Start the first node - -1. SSH to your instance: - - {% include copy-clipboard.html %} - ~~~ shell - $ ssh -i @ - ~~~ - -2. Install the latest CockroachDB binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node, specifying the location of certificates and the address at which other nodes can reach it: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --background \ - --certs-dir=certs \ - --advertise-host= - ~~~ - -## Step 6. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by setting up additional nodes that will join the cluster. - -1. SSH to your instance: - - {% include copy-clipboard.html %} - ~~~ - $ ssh -i @ - ~~~ - -2. Install CockroachDB from our latest binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's internal IP address: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --background \ - --certs-dir=certs \ - --advertise-host= \ - --join=:26257 - ~~~ - -4. Repeat these steps for each instance you want to use as a node. - -## Step 7. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, connect the built-in SQL client to node 1, with the `--host` flag set to the external IP address of node 1 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. Create a `securenodetest` database: - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE DATABASE securenodetest; - ~~~ - -3. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -4. Connect the built-in SQL client to node 2, with the `--host` flag set to the external IP address of node 2 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -5. View the cluster's databases, which will include `securenodetest`: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Test load balancing - -The AWS load balancer created in [step 3](#step-3-set-up-load-balancing) can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients can connect to the load balancer, which will then redirect the connection to a CockroachDB node. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. Launch the built-in SQL client, with the `--host` flag set to the load balancer's IP address: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. View the cluster's databases: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, the load balancer redirected the query to one of the CockroachDB nodes. - -3. Check which node you were redirected to: - - {% include copy-clipboard.html %} - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - - ~~~ - +---------+ - | node_id | - +---------+ - | 3 | - +---------+ - (1 row) - ~~~ - -4. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 9. Monitor the cluster - -View your cluster's Admin UI by going to `https://:8080`. - -{{site.data.alerts.callout_info}}Note that your browser will consider the CockroachDB-created certificate invalid; you’ll need to click through a warning message to get to the UI.{{site.data.alerts.end}} - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - -2. Click the **Databases** tab on the left to verify that `securenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## Step 10. Use the database - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the AWS load balancer, not to a CockroachDB node. - -## See Also - -- [Google Cloud Platform GCE Deployment](deploy-cockroachdb-on-google-cloud-platform.html) -- [Digital Ocean Deployment](deploy-cockroachdb-on-digital-ocean.html) -- [Azure Deployment](deploy-cockroachdb-on-microsoft-azure.html) -- [Manual Deployment](manual-deployment.html) -- [Orchestration](orchestration.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/deploy-cockroachdb-on-digital-ocean-insecure.md b/src/current/v1.0/deploy-cockroachdb-on-digital-ocean-insecure.md deleted file mode 100644 index c4eb8355d45..00000000000 --- a/src/current/v1.0/deploy-cockroachdb-on-digital-ocean-insecure.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -title: Deploy CockroachDB on Digital Ocean (Insecure) -summary: Learn how to deploy a CockroachDB cluster on Digital Ocean. -toc: true -toc_not_nested: true ---- - - - -This page shows you how to manually deploy an insecure multi-node CockroachDB cluster on Digital Ocean, using Digital Ocean's managed load balancing service to distribute client traffic. - -{{site.data.alerts.callout_danger}}If you plan to use CockroachDB in production, we strongly recommend using a secure cluster instead. Select Secure above for instructions.{{site.data.alerts.end}} - - -## Requirements - -You must have [SSH access](https://www.digitalocean.com/community/tutorials/how-to-connect-to-your-droplet-with-ssh) to each Droplet with root or sudo privileges. This is necessary for distributing binaries and starting CockroachDB. - -## Recommendations - -- If you plan to use CockroachDB in production, we recommend using a [secure cluster](deploy-cockroachdb-on-digital-ocean.html) instead. Using an insecure cluster comes with risks: - - Your cluster is open to any client that can access any node's IP addresses. - - Any user, even `root`, can log in without providing a password. - - Any user, connecting as `root`, can read or write any data in your cluster. - - There is no network encryption or authentication, and thus no confidentiality. - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -- Set up your Droplets using [private networking](https://docs.digitalocean.com/products/networking/vpc/how-to/create/). - -- Decide how you want to access your Admin UI: - - Only from specific IP addresses, which requires you to set firewall rules to allow communication on port `8080` *(documented on this page)*. - - Using an SSH tunnel, which requires you to use `--http-host=localhost` when starting your nodes. - - -## Step 1. Create Droplets - -[Create Droplets with private networking](https://docs.digitalocean.com/products/networking/vpc/how-to/create/) for each node you plan to have in your cluster. We [recommend](recommended-production-settings.html#cluster-topology): - -- Running at least 3 nodes to ensure survivability. -- Selecting the same continent for all of your Droplets for best performance. - -## Step 2. Set up load balancing - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - -Digital Ocean offers fully-managed load balancers to distribute traffic between Droplets. - -1. [Create a Digital Ocean Load Balancer](https://www.digitalocean.com/community/tutorials/an-introduction-to-digitalocean-load-balancers). Be sure to: - - Set forwarding rules to route TCP traffic from the load balancer's port **26257** to port **26257** on the node Droplets. - - Configure health checks to use HTTP port **8080** and path `/health`. -2. Note the provisioned **IP Address** for the load balancer. You'll use this later to test load balancing and to connect your application to the cluster. - -{{site.data.alerts.callout_info}}If you would prefer to use HAProxy instead of Digital Ocean's managed load balancing, see Manual Deployment for guidance.{{site.data.alerts.end}} - -## Step 3. Configure your network - -Set up a firewall for each of your Droplets, allowing TCP communication on the following two ports: - -- **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster), for applications to connect to the load balancer, and for routing from the load balancer to nodes -- **8080** (`tcp:8080`) for exposing your Admin UI - -For guidance, you can use Digital Ocean's guide to configuring firewalls based on the Droplet's OS: - -- Ubuntu and Debian can use [`ufw`](https://www.digitalocean.com/community/tutorials/how-to-setup-a-firewall-with-ufw-on-an-ubuntu-and-debian-cloud-server). -- FreeBSD can use [`ipfw`](https://www.digitalocean.com/community/tutorials/recommended-steps-for-new-freebsd-10-1-servers). -- Fedora can use [`iptables`](https://www.digitalocean.com/community/tutorials/initial-setup-of-a-fedora-22-server). -- CoreOS can use [`iptables`](https://www.digitalocean.com/community/tutorials/how-to-secure-your-coreos-cluster-with-tls-ssl-and-firewall-rules). -- CentOS can use [`firewalld`](https://www.digitalocean.com/community/tutorials/how-to-set-up-a-firewall-using-firewalld-on-centos-7). - -## Step 4. Start the first node - -1. SSH to your Droplet: - - ~~~ shell - $ ssh @ - ~~~ - -2. Install CockroachDB from our latest binary: - - ~~~ shell - # Get the latest CockroachDB tarball: - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary: - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary: - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node, which will communicate with other nodes on its internal IP address: - - ~~~ shell - $ cockroach start --insecure \ - --background \ - --advertise-host= - ~~~ - -## Step 5. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by setting up additional nodes that will join the cluster. - -1. SSH to your Droplet: - - ~~~ - $ ssh @ - ~~~ - -2. Install CockroachDB from our latest binary: - - ~~~ shell - # Get the latest CockroachDB tarball: - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary: - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary: - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's internal IP address: - - ~~~ shell - $ cockroach start --insecure \ - --background \ - --advertise-host= \ - --join=:26257 - ~~~ - -4. Repeat these steps for each Droplet you want to use as a node. - -## Step 6. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. SSH to your first node: - - ~~~ shell - $ ssh @ - ~~~ - -2. Launch the built-in SQL client and create a database: - - ~~~ shell - $ cockroach sql --insecure - ~~~ - ~~~ sql - > CREATE DATABASE insecurenodetest; - ~~~ - -3. In another terminal window, SSH to another node: - - ~~~ shell - $ ssh @ - ~~~ - -4. Launch the built-in SQL client: - - ~~~ shell - $ cockroach sql --insecure - ~~~ - -5. View the cluster's databases, which will include `insecurenodetest`: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 7. Test load balancing - -The Digital Ocean Load Balancer created in [step 2](#step-2-set-up-load-balancing) can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients can connect to the load balancer, which will then redirect the connection to a CockroachDB node. - -To test this, install CockroachDB locally and use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if it's not there already. - -2. Launch the built-in SQL client, with the `--host` flag set to the load balancer's IP address: - - ~~~ shell - $ cockroach sql --insecure \ - --host= \ - --port=26257 - ~~~ - -3. View the cluster's databases: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, the load balancer redirected the query to one of the CockroachDB nodes. - -4. Check which node you were redirected to: - - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - ~~~ - +---------+ - | node_id | - +---------+ - | 3 | - +---------+ - (1 row) - ~~~ - -5. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Monitor the cluster - -View your cluster's Admin UI by going to `http://:8080`. - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - - Also check the **Replicas** column. If you have nodes with 0 replicas, it's possible you didn't properly set the `--advertise-host` flag to the Droplet's internal IP address. This prevents the node from receiving replicas and working as part of the cluster. - -2. Click the **Databases** tab on the left to verify that `insecurenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## Step 9. Use the database - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the Digital Ocean Load Balancer, not to a CockroachDB node. - -## See Also - -- [Google Cloud GCE Deployment](deploy-cockroachdb-on-google-cloud-platform.html) -- [AWS Deployment](deploy-cockroachdb-on-aws.html) -- [Azure Deployment](deploy-cockroachdb-on-microsoft-azure.html) -- [Manual Deployment](manual-deployment.html) -- [Orchestration](orchestration.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/deploy-cockroachdb-on-digital-ocean.md b/src/current/v1.0/deploy-cockroachdb-on-digital-ocean.md deleted file mode 100644 index f59f9332f09..00000000000 --- a/src/current/v1.0/deploy-cockroachdb-on-digital-ocean.md +++ /dev/null @@ -1,430 +0,0 @@ ---- -title: Deploy CockroachDB on Digital Ocean -summary: Learn how to deploy a CockroachDB cluster on Digital Ocean. -toc: true -toc_not_nested: true ---- - -
- - -
- -This page shows you how to manually deploy a secure multi-node CockroachDB cluster on Digital Ocean, using Digital Ocean's managed load balancing service to distribute client traffic. - -If you are only testing CockroachDB, or you are not concerned with protecting network communication with TLS encryption, you can use an insecure cluster instead. Select **Insecure** above for instructions. - - -## Requirements - -- Locally, you must have [CockroachDB installed](install-cockroachdb.html), which you’ll use to generate and manage your deployment’s certificates. - -- In Digitial Ocean, you must have [SSH access](https://www.digitalocean.com/community/tutorials/how-to-connect-to-your-droplet-with-ssh) to each Droplet with root or sudo privileges. This is necessary for distributing binaries and starting CockroachDB. - -## Recommendations - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -- Set up your Droplets using [private networking](https://docs.digitalocean.com/products/networking/vpc/how-to/create/). - -- Decide how you want to access your Admin UI: - - Only from specific IP addresses, which requires you to set firewall rules to allow communication on port `8080` *(documented on this page)*. - - Using an SSH tunnel, which requires you to use `--http-host=localhost` when starting your nodes. - -## Step 1. Create Droplets - -[Create Droplets with private networking](https://docs.digitalocean.com/products/networking/vpc/how-to/create/) for each node you plan to have in your cluster. We [recommend](recommended-production-settings.html#cluster-topology): - -- Running at least 3 nodes to ensure survivability. -- Selecting the same continent for all of your Droplets for best performance. - -## Step 2. Set up load balancing - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - -Digital Ocean offers fully-managed load balancers to distribute traffic between Droplets. - -1. [Create a Digital Ocean Load Balancer](https://www.digitalocean.com/community/tutorials/an-introduction-to-digitalocean-load-balancers). Be sure to: - - Set forwarding rules to route TCP traffic from the load balancer's port **26257** to port **26257** on the node Droplets. - - Configure health checks to use HTTP port **8080** and path `/health`. -2. Note the provisioned **IP Address** for the load balancer. You'll use this later to test load balancing and to connect your application to the cluster. - -{{site.data.alerts.callout_info}}If you would prefer to use HAProxy instead of Digital Ocean's managed load balancing, see Manual Deployment for guidance.{{site.data.alerts.end}} - -## Step 3. Configure your network - -Set up a firewall for each of your Droplets, allowing TCP communication on the following two ports: - -- **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster), for applications to connect to the load balancer, and for routing from the load balancer to nodes -- **8080** (`tcp:8080`) for exposing your Admin UI - -For guidance, you can use Digital Ocean's guide to configuring firewalls based on the Droplet's OS: - -- Ubuntu and Debian can use [`ufw`](https://www.digitalocean.com/community/tutorials/how-to-setup-a-firewall-with-ufw-on-an-ubuntu-and-debian-cloud-server). -- FreeBSD can use [`ipfw`](https://www.digitalocean.com/community/tutorials/recommended-steps-for-new-freebsd-10-1-servers). -- Fedora can use [`iptables`](https://www.digitalocean.com/community/tutorials/initial-setup-of-a-fedora-22-server). -- CoreOS can use [`iptables`](https://www.digitalocean.com/community/tutorials/how-to-secure-your-coreos-cluster-with-tls-ssl-and-firewall-rules). -- CentOS can use [`firewalld`](https://www.digitalocean.com/community/tutorials/how-to-set-up-a-firewall-using-firewalld-on-centos-7). - -## Step 4. Generate certificates - -Locally, you'll need to [create the following certificates and keys](create-security-certificates.html): - -- A certificate authority (CA) key pair (`ca.crt` and `ca.key`). -- A node key pair for each node, issued to its IP addresses and any common names the machine uses, as well as to the IP address provisioned for the Digital Ocean Load Balancer. -- A client key pair for the `root` user. - -{{site.data.alerts.callout_success}}Before beginning, it's useful to collect each of your machine's internal and external IP addresses, as well as any server names you want to issue certificates for.{{site.data.alerts.end}} - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if you haven't already. - -2. Create two directories: - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir certs - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir my-safe-directory - ~~~ - - `certs`: You'll generate your CA certificate and all node and client certificates and keys in this directory and then upload some of the files to your nodes. - - `my-safe-directory`: You'll generate your CA key in this directory and then reference the key when generating node and client certificates. After that, you'll keep the key safe and secret; you will not upload it to your nodes. - -3. Create the CA certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-ca \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -4. Create the certificate and key for the first node, issued to all common names you might use to refer to the node as well as to addresses provisioned for the Digital Ocean Load Balancer: - - ``, which is the node Droplet's **Private IP**. - - ``, which is the node Droplet's **ipv4** address. - - ``, which is the node Droplet's **Name**. - - ``, which include any domain names you point to the node Droplet. - - `localhost` and `127.0.0.1` - - ``, which is the Digital Ocean Load Balancer's provisioned **IP Address**. - - ``, which is the Digital Ocean Load Balancer's **Name**. - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -5. Upload certificates to the first node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -6. Delete the local copy of the node certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ rm certs/node.crt certs/node.key - ~~~ - - {{site.data.alerts.callout_info}}This is necessary because the certificates and keys for additional nodes will also be named node.crt and node.key As an alternative to deleting these files, you can run the next cockroach cert create-node commands with the --overwrite flag.{{site.data.alerts.end}} - -7. Create the certificate and key for the second node, issued to all common names you might use to refer to the node as well as to addresses provisioned for the Digital Ocean Load Balancer: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -8. Upload certificates to the second node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -9. Repeat steps 6 - 8 for each additional node. - -10. Create a client certificate and key for the `root` user: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-client \ - root \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {{site.data.alerts.callout_success}}In later steps, you'll use the root user's certificate to run cockroach client commands from your local machine. If you might also want to run cockroach client commands directly on a node (e.g., for local debugging), you'll need to copy the root user's certificate and key to that node as well.{{site.data.alerts.end}} - -## Step 5. Start the first node - -1. SSH to your Droplet: - - {% include copy-clipboard.html %} - ~~~ shell - $ ssh @ - ~~~ - -2. Install the latest CockroachDB binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node, specifying the location of certificates and the address at which other nodes can reach it: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --background \ - --certs-dir=certs \ - --advertise-host= - ~~~ - -## Step 6. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by setting up additional nodes that will join the cluster. - -1. SSH to your Droplet: - - {% include copy-clipboard.html %} - ~~~ - $ ssh @ - ~~~ - -2. Install the latest CockroachDB binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's internal IP address: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --background \ - --certs-dir=certs \ - --advertise-host= \ - --join=:26257 - ~~~ - -4. Repeat these steps for each Droplet you want to use as a node. - -## Step 7. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, connect the built-in SQL client to node 1, with the `--host` flag set to the address of node 1 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. Create a `securenodetest` database: - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE DATABASE securenodetest; - ~~~ - -3. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -4. Connect the built-in SQL client to node 2, with the `--host` flag set to the address of node 2 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -5. View the cluster's databases, which will include `securenodetest`: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Test load balancing - -The Digital Ocean Load Balancer created in [step 2](#step-2-set-up-load-balancing) can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients can connect to the load balancer, which will then redirect the connection to a CockroachDB node. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, launch the built-in SQL client, with the `--host` flag set to the load balancer's IP address and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. View the cluster's databases: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, the load balancer redirected the query to one of the CockroachDB nodes. - -3. Check which node you were redirected to: - - {% include copy-clipboard.html %} - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - - ~~~ - +---------+ - | node_id | - +---------+ - | 3 | - +---------+ - (1 row) - ~~~ - -4. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 9. Monitor the cluster - -View your cluster's Admin UI by going to `https://:8080`. - -{{site.data.alerts.callout_info}}Note that your browser will consider the CockroachDB-created certificate invalid; you’ll need to click through a warning message to get to the UI.{{site.data.alerts.end}} - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - - Also check the **Replicas** column. If you have nodes with 0 replicas, it's possible you didn't properly set the `--advertise-host` flag to the Droplet's internal IP address. This prevents the node from receiving replicas and working as part of the cluster. - -2. Click the **Databases** tab on the left to verify that `securenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## Step 10. Use the database - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the Digital Ocean Load Balancer, not to a CockroachDB node. - -## See Also - -- [Google Cloud GCE Deployment](deploy-cockroachdb-on-google-cloud-platform.html) -- [AWS Deployment](deploy-cockroachdb-on-aws.html) -- [Azure Deployment](deploy-cockroachdb-on-microsoft-azure.html) -- [Manual Deployment](manual-deployment.html) -- [Orchestration](orchestration.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/deploy-cockroachdb-on-google-cloud-platform-insecure.md b/src/current/v1.0/deploy-cockroachdb-on-google-cloud-platform-insecure.md deleted file mode 100644 index f6eca04676f..00000000000 --- a/src/current/v1.0/deploy-cockroachdb-on-google-cloud-platform-insecure.md +++ /dev/null @@ -1,291 +0,0 @@ ---- -title: Deploy CockroachDB on Google Cloud Platform GCE (Insecure) -summary: Learn how to deploy CockroachDB on Google Cloud Platform's Compute Engine. -toc: true -toc_not_nested: true ---- - - - -This page shows you how to manually deploy an insecure multi-node CockroachDB cluster on Google Cloud Platform's Compute Engine (GCE), using Google's TCP Proxy Load Balancing service to distribute client traffic. - -{{site.data.alerts.callout_danger}}If you plan to use CockroachDB in production, we strongly recommend using a secure cluster instead. Select Secure above for instructions.{{site.data.alerts.end}} - - -## Requirements - -You must have [SSH access](https://cloud.google.com/compute/docs/instances/connecting-to-instance) to each machine with root or sudo privileges. This is necessary for distributing binaries and starting CockroachDB. - -## Recommendations - -- If you plan to use CockroachDB in production, we recommend using a [secure cluster](deploy-cockroachdb-on-google-cloud-platform.html) instead. Using an insecure cluster comes with risks: - - Your cluster is open to any client that can access any node's IP addresses. - - Any user, even `root`, can log in without providing a password. - - Any user, connecting as `root`, can read or write any data in your cluster. - - There is no network encryption or authentication, and thus no confidentiality. - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -- Decide how you want to access your Admin UI: - - Only from specific IP addresses, which requires you to set firewall rules to allow communication on port `8080` *(documented on this page)*. - - Using an SSH tunnel, which requires you to use `--http-host=localhost` when starting your nodes. - -## Step 1. Configure your network - -CockroachDB requires TCP communication on two ports: - -- **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster) -- **8080** (`tcp:8080`) for exposing your Admin UI - -Inter-node communication works by default using your GCE instances' internal IP addresses, which allow communication with other instances on CockroachDB's default port `26257`. However, to expose your admin UI and allow traffic from the TCP proxy load balancer and health checker to your instances, you need to [create firewall rules for your project](https://cloud.google.com/compute/docs/vpc/firewalls). - -### Creating Firewall Rules - -When creating firewall rules, we recommend using Google Cloud Platform's **tag** feature, which lets you specify that you want to apply the rule only to instance that include the same tag. - -#### Admin UI - -| Field | Recommended Value | -|-------|-------------------| -| Name | **cockroachadmin** | -| Source filter | IP ranges | -| Source IP ranges | Your local network's IP ranges | -| Allowed protocols... | **tcp:8080** | -| Target tags | **cockroachdb** | - -#### Application Data - -Applications will not connect directly to your CockroachDB nodes. Instead, they'll connect to GCE's TCP Proxy Load Balancing service, which automatically routes traffic to the instances that are closest to the user. Because this service is implemented at the edge of the Google Cloud, you'll need to create a firewall rule to allow traffic from the load balancer and health checker to your instances. This is covered in [Step 3](#step-3-set-up-tcp-proxy-load-balancing). - -{{site.data.alerts.callout_danger}}When using TCP Proxy Load Balancing, you cannot use firewall rules to control access to the load balancer. If you need such control, consider using Network TCP Load Balancing instead, but note that it cannot be used across regions. You might also consider using the HAProxy load balancer (see Manual Deployment for guidance).{{site.data.alerts.end}} - -## Step 2. Create instances - -[Create an instance](https://cloud.google.com/compute/docs/instances/create-start-instance) for each node you plan to have in your cluster. We [recommend](recommended-production-settings.html#cluster-topology): - -- Running at least 3 nodes to ensure survivability. -- Selecting the same continent for all of your instances for best performance. - -If you used a tag for your firewall rules, when you create the instance, select **Management, disk, networking, SSH keys**. Then on the **Networking** tab, in the **Network tags** field, enter **cockroachdb**. - -## Step 3. Set up TCP Proxy Load Balancing - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - -GCE offers fully-managed [TCP Proxy Load Balancing](https://cloud.google.com/load-balancing/docs/tcp/). This service lets you use a single IP address for all users around the world, automatically routing traffic to the instances that are closest to the user. - -{{site.data.alerts.callout_danger}}When using TCP Proxy Load Balancing, you cannot use firewall rules to control access to the load balancer. If you need such control, consider using Network TCP Load Balancing instead, but note that it cannot be used across regions. You might also consider using the HAProxy load balancer (see Manual Deployment for guidance).{{site.data.alerts.end}} - -To use GCE's TCP Proxy Load Balancing service: - -1. For each zone in which you're running an instance, [create a distinct instance group](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-unmanaged-instances). - - To ensure that the load balancer knows where to direct traffic, specify a port name mapping, with `tcp26257` as the **Port name** and `26257` as the **Port number**. -2. [Add the relevant instances to each instance group](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-unmanaged-instances#addinstances). -3. [Configure TCP Proxy Load Balancing](https://cloud.google.com/load-balancing/docs/tcp/setting-up-tcp#configure_load_balancer). - - During backend configuration, create a health check, setting the **Protocol** to `HTTP`, the **Port** to `8080`, and the **Request path** to `/health`. If you want to maintain long-lived SQL connections that may be idle for more than tens of seconds, increase the backend timeout setting accordingly. - - During frontend configuration, reserve a static IP address and choose a port. Note this address/port combination, as you'll use it for all of you client connections. -4. [Create a firewall rule](https://cloud.google.com/load-balancing/docs/tcp/setting-up-tcp#config-hc-firewall) to allow traffic from the load balancer and health checker to your instances. This is necessary because TCP Proxy Load Balancing is implemented at the edge of the Google Cloud. - - Be sure to set **Source IP ranges** to `130.211.0.0/22` and `35.191.0.0/16` and set **Target tags** to `cockroachdb` (not to the value specified in the linked instructions). - -## Step 4. Start the first node - -1. SSH to your instance: - - ~~~ shell - $ ssh @ - ~~~ - -2. Install the latest CockroachDB binary: - - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{page.release_info.version}}.linux-amd64.tgz - - # Extract the binary. - $ tar -xzf cockroach-{{page.release_info.version}}.linux-amd64.tgz \ - --strip=1 cockroach-{{page.release_info.version}}.linux-amd64/cockroach - - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node: - - ~~~ shell - $ cockroach start --insecure \ - --background - ~~~ - -## Step 5. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by setting up additional nodes that will join the cluster. - -1. SSH to your instance: - - ~~~ - $ ssh @ - ~~~ - -2. Install CockroachDB from our latest binary: - - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{page.release_info.version}}.linux-amd64.tgz - - # Extract the binary. - $ tar -xzf cockroach-{{page.release_info.version}}.linux-amd64.tgz \ - --strip=1 cockroach-{{page.release_info.version}}.linux-amd64/cockroach - - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's internal IP address: - - ~~~ shell - $ cockroach start --insecure \ - --background \ - --join=:26257 - ~~~ - -4. Repeat these steps for each instance you want to use as a node. - -## Step 6. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. SSH to your first node: - - ~~~ shell - $ ssh @ - ~~~ - -2. Launch the built-in SQL client and create a database: - - ~~~ shell - $ cockroach sql --insecure - ~~~ - ~~~ sql - > CREATE DATABASE insecurenodetest; - ~~~ - -3. In another terminal window, SSH to another node: - - ~~~ shell - $ ssh @ - ~~~ - -4. Launch the built-in SQL client: - - ~~~ shell - $ cockroach sql --insecure - ~~~ - -5. View the cluster's databases, which will include `insecurenodetest`: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 7. Test load balancing - -The GCE load balancer created in [step 3](#step-3-set-up-tcp-proxy-load-balancing) can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients connect to the load balancer, which will then redirect the connection to a CockroachDB node. - -To test this, install CockroachDB locally and use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if it's not there already. - -2. Launch the built-in SQL client, with the `--host` flag set to the load balancer's IP address: - - ~~~ shell - $ cockroach sql --insecure \ - --host= \ - --port= - ~~~ - -3. View the cluster's databases: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, the load balancer redirected the query to one of the CockroachDB nodes. - -4. Check which node you were redirected to: - - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - ~~~ - +---------+ - | node_id | - +---------+ - | 1 | - +---------+ - (1 row) - ~~~ - -5. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Monitor the cluster - -View your cluster's Admin UI by going to `http://:8080`. - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - -2. Click the **Databases** tab on the left to verify that `insecurenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## Step 9. Use the database - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the GCE load balancer, not to a CockroachDB node. - -## See Also - -- [Digital Ocean Deployment](deploy-cockroachdb-on-digital-ocean.html) -- [AWS Deployment](deploy-cockroachdb-on-aws.html) -- [Azure Deployment](deploy-cockroachdb-on-microsoft-azure.html) -- [Manual Deployment](manual-deployment.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/deploy-cockroachdb-on-google-cloud-platform.md b/src/current/v1.0/deploy-cockroachdb-on-google-cloud-platform.md deleted file mode 100644 index ef883174be6..00000000000 --- a/src/current/v1.0/deploy-cockroachdb-on-google-cloud-platform.md +++ /dev/null @@ -1,447 +0,0 @@ ---- -title: Deploy CockroachDB on Google Cloud Platform GCE -summary: Learn how to deploy CockroachDB on Google Cloud Platform's Compute Engine. -toc: true -toc_not_nested: true ---- - -
- - -
- -This page shows you how to manually deploy a secure multi-node CockroachDB cluster on Google Cloud Platform's Compute Engine (GCE), using Google's TCP Proxy Load Balancing service to distribute client traffic. - -If you are only testing CockroachDB, or you are not concerned with protecting network communication with TLS encryption, you can use an insecure cluster instead. Select **Insecure** above for instructions. - - -## Requirements - -- Locally, you must have [CockroachDB installed](install-cockroachdb.html), which you'll use to generate and manage your deployment's certificates. - -- In GCE, you must have [SSH access](https://cloud.google.com/compute/docs/instances/connecting-to-instance) to each machine with root or sudo privileges. This is necessary for distributing binaries and starting CockroachDB. - -## Recommendations - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -- Decide how you want to access your Admin UI: - - Only from specific IP addresses, which requires you to set firewall rules to allow communication on port `8080` *(documented on this page)*. - - Using an SSH tunnel, which requires you to use `--http-host=localhost` when starting your nodes. - -## Step 1. Configure your network - -CockroachDB requires TCP communication on two ports: - -- **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster) -- **8080** (`tcp:8080`) for exposing your Admin UI - -Inter-node communication works by default using your GCE instances' internal IP addresses, which allow communication with other instances on CockroachDB's default port `26257`. However, to expose your admin UI and allow traffic from the TCP proxy load balancer and health checker to your instances, you need to [create firewall rules for your project](https://cloud.google.com/compute/docs/vpc/firewalls). - -### Creating Firewall Rules - -When creating firewall rules, we recommend using Google Cloud Platform's **tag** feature, which lets you specify that you want to apply the rule only to instance that include the same tag. - -#### Admin UI - -| Field | Recommended Value | -|-------|-------------------| -| Name | **cockroachadmin** | -| Source filter | IP ranges | -| Source IP ranges | Your local network's IP ranges | -| Allowed protocols... | **tcp:8080** | -| Target tags | **cockroachdb** | - -#### Application Data - -Applications will not connect directly to your CockroachDB nodes. Instead, they'll connect to GCE's TCP Proxy Load Balancing service, which automatically routes traffic to the instances that are closest to the user. Because this service is implemented at the edge of the Google Cloud, you'll need to create a firewall rule to allow traffic from the load balancer and health checker to your instances. This is covered in [Step 3](#step-3-set-up-tcp-proxy-load-balancing). - -{{site.data.alerts.callout_danger}}When using TCP Proxy Load Balancing, you cannot use firewall rules to control access to the load balancer. If you need such control, consider using Network TCP Load Balancing instead, but note that it cannot be used across regions. You might also consider using the HAProxy load balancer (see Manual Deployment for guidance).{{site.data.alerts.end}} - -## Step 2. Create instances - -[Create an instance](https://cloud.google.com/compute/docs/instances/create-start-instance) for each node you plan to have in your cluster. We [recommend](recommended-production-settings.html#cluster-topology): - -- Running at least 3 nodes to ensure survivability. -- Selecting the same continent for all of your instances for best performance. - -If you used a tag for your firewall rules, when you create the instance, select **Management, disk, networking, SSH keys**. Then on the **Networking** tab, in the **Network tags** field, enter **cockroachdb**. - -## Step 3. Set up TCP Proxy Load Balancing - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - -GCE offers fully-managed [TCP Proxy Load Balancing](https://cloud.google.com/load-balancing/docs/tcp/). This service lets you use a single IP address for all users around the world, automatically routing traffic to the instances that are closest to the user. - -{{site.data.alerts.callout_danger}}When using TCP Proxy Load Balancing, you cannot use firewall rules to control access to the load balancer. If you need such control, consider using Network TCP Load Balancing instead, but note that it cannot be used across regions. You might also consider using the HAProxy load balancer (see Manual Deployment for guidance).{{site.data.alerts.end}} - -To use GCE's TCP Proxy Load Balancing service: - -1. For each zone in which you're running an instance, [create a distinct instance group](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-unmanaged-instances). - - To ensure that the load balancer knows where to direct traffic, specify a port name mapping, with `tcp26257` as the **Port name** and `26257` as the **Port number**. -2. [Add the relevant instances to each instance group](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-unmanaged-instances#addinstances). -3. [Configure TCP Proxy Load Balancing](https://cloud.google.com/load-balancing/docs/tcp/setting-up-tcp#configure_load_balancer). - - During backend configuration, create a health check, setting the **Protocol** to `HTTPS`, the **Port** to `8080`, and the **Request path** to `/health`. If you want to maintain long-lived SQL connections that may be idle for more than tens of seconds, increase the backend timeout setting accordingly. - - During frontend configuration, reserve a static IP address and note the IP address and the port you select. You'll use this address and port for all client connections. -4. [Create a firewall rule](https://cloud.google.com/load-balancing/docs/tcp/setting-up-tcp#config-hc-firewall) to allow traffic from the load balancer and health checker to your instances. This is necessary because TCP Proxy Load Balancing is implemented at the edge of the Google Cloud. - - Be sure to set **Source IP ranges** to `130.211.0.0/22` and `35.191.0.0/16` and set **Target tags** to `cockroachdb` (not to the value specified in the linked instructions). - -## Step 4. Generate certificates - -Locally, you'll need to [create the following certificates and keys](create-security-certificates.html): - -- A certificate authority (CA) key pair (`ca.crt` and `ca.key`). -- A node key pair for each node, issued to its IP addresses and any common names the machine uses, as well as to the IP address provisioned for the GCE load balancer. -- A client key pair for the `root` user. - -{{site.data.alerts.callout_success}}Before beginning, it's useful to collect each of your machine's internal and external IP addresses, as well as any server names you want to issue certificates for.{{site.data.alerts.end}} - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if you haven't already. - -2. Create two directories: - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir certs - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir my-safe-directory - ~~~ - - `certs`: You'll generate your CA certificate and all node and client certificates and keys in this directory and then upload some of the files to your nodes. - - `my-safe-directory`: You'll generate your CA key in this directory and then reference the key when generating node and client certificates. After that, you'll keep the key safe and secret; you will not upload it to your nodes. - -3. Create the CA certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-ca \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -4. Create the certificate and key for the first node, issued to all common names you might use to refer to the node as well as to addresses provisioned for the GCE load balancer: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - `` which is the instance's **Internal IP**. - - `` which is the instance's **External IP address**. - - `` which is the instance's **Name**. - - `` which include any domain names you point to the instance. - - `localhost` and `127.0.0.1` - - `` - - `` - -5. Upload certificates to the first node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -6. Delete the local copy of the node certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ rm certs/node.crt certs/node.key - ~~~ - - {{site.data.alerts.callout_info}}This is necessary because the certificates and keys for additional nodes will also be named node.crt and node.key As an alternative to deleting these files, you can run the next cockroach cert create-node commands with the --overwrite flag.{{site.data.alerts.end}} - -7. Create the certificate and key for the second node, issued to all common names you might use to refer to the node as well as to addresses provisioned for the GCE load balancer: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -8. Upload certificates to the second node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -9. Repeat steps 6 - 8 for each additional node. - -10. Create a client certificate and key for the `root` user: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-client \ - root \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {{site.data.alerts.callout_success}}In later steps, you'll use the root user's certificate to run cockroach client commands from your local machine. If you might also want to run cockroach client commands directly on a node (e.g., for local debugging), you'll need to copy the root user's certificate and key to that node as well.{{site.data.alerts.end}} - -## Step 5. Start the first node - -1. SSH to your instance: - - {% include copy-clipboard.html %} - ~~~ shell - $ ssh @ - ~~~ - -2. Install the latest CockroachDB binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node, specifying the location of certificates and the address at which other nodes can reach it: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --background \ - --certs-dir=certs - ~~~ - -## Step 6. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by setting up additional nodes that will join the cluster. - -1. SSH to your instance: - - {% include copy-clipboard.html %} - ~~~ - $ ssh @ - ~~~ - -2. Install the latest CockroachDB binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's internal IP address: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --background \ - --certs-dir=certs \ - --join=:26257 - ~~~ - -4. Repeat these steps for each instance you want to use as a node. - -## Step 7. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, connect the built-in SQL client to node 1, with the `--host` flag set to the external address of node 1 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. Create a `securenodetest` database: - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE DATABASE securenodetest; - ~~~ - -3. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -4. Connect the built-in SQL client to node 2, with the `--host` flag set to the external address of node 2 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -5. View the cluster's databases, which will include `securenodetest`: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Test load balancing - -The GCE load balancer created in [step 3](#step-3-set-up-tcp-proxy-load-balancing) can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients connect to the load balancer, which will then redirect the connection to a CockroachDB node. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, launch the built-in SQL client, with the `--host` flag set to the load balancer's IP address and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs - --host= \ - --port= - ~~~ - -2. View the cluster's databases: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, the load balancer redirected the query to one of the CockroachDB nodes. - -3. Check which node you were redirected to: - - {% include copy-clipboard.html %} - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - - ~~~ - +---------+ - | node_id | - +---------+ - | 1 | - +---------+ - (1 row) - ~~~ - -4. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 9. Monitor the cluster - -View your cluster's Admin UI by going to `https://:8080`. - -{{site.data.alerts.callout_info}}Note that your browser will consider the CockroachDB-created certificate invalid; you’ll need to click through a warning message to get to the UI.{{site.data.alerts.end}} - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - -2. Click the **Databases** tab on the left to verify that `securenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## Step 10. Use the database - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the GCE load balancer, not to a CockroachDB node. - -## See Also - -- [Digital Ocean Deployment](deploy-cockroachdb-on-digital-ocean.html) -- [AWS Deployment](deploy-cockroachdb-on-aws.html) -- [Azure Deployment](deploy-cockroachdb-on-microsoft-azure.html) -- [Manual Deployment](manual-deployment.html) -- [Orchestration](orchestration.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/deploy-cockroachdb-on-microsoft-azure-insecure.md b/src/current/v1.0/deploy-cockroachdb-on-microsoft-azure-insecure.md deleted file mode 100644 index 6073db2bc87..00000000000 --- a/src/current/v1.0/deploy-cockroachdb-on-microsoft-azure-insecure.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -title: Deploy CockroachDB on Microsoft Azure (Insecure) -summary: Learn how to deploy CockroachDB on Microsoft Azure. -toc: true -toc_not_nested: true ---- - - - -This page shows you how to manually deploy an insecure multi-node CockroachDB cluster on Microsoft Azure, using Azure's managed load balancing service to distribute client traffic. - -{{site.data.alerts.callout_danger}}If you plan to use CockroachDB in production, we strongly recommend using a secure cluster instead. Select Secure above for instructions.{{site.data.alerts.end}} - - -## Requirements - -You must have SSH access to each machine with root or sudo privileges. This is necessary for distributing binaries and starting CockroachDB. - -## Recommendations - -- If you plan to use CockroachDB in production, we recommend using a [secure cluster](deploy-cockroachdb-on-microsoft-azure.html) instead. Using an insecure cluster comes with risks: - - Your cluster is open to any client that can access any node's IP addresses. - - Any user, even `root`, can log in without providing a password. - - Any user, connecting as `root`, can read or write any data in your cluster. - - There is no network encryption or authentication, and thus no confidentiality. - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -- Decide how you want to access your Admin UI: - - Only from specific IP addresses, which requires you to set firewall rules to allow communication on port `8080` *(documented on this page)*. - - Using an SSH tunnel, which requires you to use `--http-host=localhost` when starting your nodes. - -## Step 1. Configure your network - -CockroachDB requires TCP communication on two ports: - -- **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster), for applications to connect to the load balancer, and for routing from the load balancer to nodes -- **8080** (`tcp:8080`) for exposing your Admin UI - -To enable this in Azure, you must create a Resource Group, Virtual Network, and Network Security Group. - -1. [Create a Resource Group](https://azure.microsoft.com/en-us/updates/create-empty-resource-groups/). - -2. [Create a Virtual Network](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-create-vnet-arm-pportal) that uses your **Resource Group**. - -3. [Create a Network Security Group](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-create-nsg-arm-pportal) that uses your **Resource Group**, and then add the following **inbound** rules to it: - - **Admin UI support**: - - | Field | Recommended Value | - |-------|-------------------| - | Name | **cockroachadmin** | - | Source | **IP Addresses** | - | Source IP addresses/CIDR ranges | Your local network’s IP ranges | - | Source port ranges | * | - | Destination | **Any** | - | Destination port range | **8080** | - | Protocol | **TCP** | - | Action | **Allow** | - | Priority | Any value > 1000 | - - **Application support**: - - {{site.data.alerts.callout_success}}If your application is also hosted on the same Azure Virtual Network, you will not need to create a firewall rule for your application to communicate with your load balancer.{{site.data.alerts.end}} - - | Field | Recommended Value | - |-------|-------------------| - | Name | **cockroachapp** | - | Source | **IP Addresses** | - | Source IP addresses/CIDR ranges | Your local network’s IP ranges | - | Source port ranges | * | - | Destination | **Any** | - | Destination port range | **26257** | - | Protocol | **TCP** | - | Action | **Allow** | - | Priority | Any value > 1000 | - -## Step 2. Create VMs - -[Create Linux VMs](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/quick-create-portal) for each node you plan to have in your cluster. We [recommend](recommended-production-settings.html#cluster-topology): - -- Running at least 3 nodes to ensure survivability. -- Selecting the same continent for all of your VMs for best performance. - -When creating the VMs, make sure to select the **Resource Group**, **Virtual Network**, and **Network Security Group** you created. - -## Step 3. Set up load balancing - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - -Microsoft Azure offers fully-managed load balancing to distribute traffic between instances. - -1. [Add Azure load balancing](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-overview). Be sure to: - - Set forwarding rules to route TCP traffic from the load balancer's port **26257** to port **26257** on the node Droplets. - - Configure health checks to use HTTP port **8080** and path `/health`. - -2. Note the provisioned **IP Address** for the load balancer. You'll use this later to test load balancing and to connect your application to the cluster. - -{{site.data.alerts.callout_info}}If you would prefer to use HAProxy instead of Azure's managed load balancing, see Manual Deployment for guidance.{{site.data.alerts.end}} - -## Step 4. Start the first node - -1. SSH to your VM: - - ~~~ shell - $ ssh @ - ~~~ - -2. Install the latest CockroachDB binary: - - ~~~ shell - # Get the latest CockroachDB tarball. - $ wget https://s3.amazonaws.com/binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node: - - ~~~ shell - $ cockroach start --insecure \ - --background \ - --advertise-host= - ~~~ - - {{site.data.alerts.callout_info}}You can find the VM's internal IP address listed in the Resource Group's Virtual Network.{{site.data.alerts.end}} - -## Step 5. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by setting up additional nodes that will join the cluster. - -1. SSH to your VM: - - ~~~ - $ ssh @ - ~~~ - -2. Install CockroachDB from our latest binary: - - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's internal IP address: - - ~~~ shell - $ cockroach start --insecure \ - --background \ - --advertise-host= \ - --join=:26257 - ~~~ - -4. Repeat these steps for each VM you want to use as a node. - -## Step 6. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - - -1. SSH to your first node: - - ~~~ shell - $ ssh @ - ~~~ - -2. Launch the built-in SQL client and create a database: - - ~~~ shell - $ cockroach sql --insecure - ~~~ - ~~~ sql - > CREATE DATABASE insecurenodetest; - ~~~ - -3. In another terminal window, SSH to another node: - - ~~~ shell - $ ssh @ - ~~~ - -4. Launch the built-in SQL client: - - ~~~ shell - $ cockroach sql --insecure - ~~~ - -5. View the cluster's databases, which will include `insecurenodetest`: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 7. Test load balancing - -The Azure load balancer created in [step 3](#step-3-set-up-load-balancing) can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients can connect to the load balancer, which will then redirect the connection to a CockroachDB node. - -To test this, install CockroachDB locally and use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if it's not there already. - -2. Launch the built-in SQL client, with the `--host` flag set to the load balancer's IP address: - - ~~~ shell - $ cockroach sql --insecure \ - --host= \ - --port=26257 - ~~~ - -3. View the cluster's databases: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, the load balancer redirected the query to one of the CockroachDB nodes. - -4. Check which node you were redirected to: - - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - ~~~ - +---------+ - | node_id | - +---------+ - | 3 | - +---------+ - (1 row) - ~~~ - -5. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Monitor the cluster - -View your cluster's Admin UI by going to `http://:8080`. - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - -2. Click the **Databases** tab on the left to verify that `insecurenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## Step 9. Use the database - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the Azure load balancer, not to a CockroachDB node. - -## See Also - -- [GCE Deployment](deploy-cockroachdb-on-google-cloud-platform.html) -- [Digital Ocean Deployment](deploy-cockroachdb-on-digital-ocean.html) -- [AWS Deployment](deploy-cockroachdb-on-aws.html) -- [Manual Deployment](manual-deployment.html) -- [Orchestration](orchestration.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/deploy-cockroachdb-on-microsoft-azure.md b/src/current/v1.0/deploy-cockroachdb-on-microsoft-azure.md deleted file mode 100644 index 1414f3ecb3b..00000000000 --- a/src/current/v1.0/deploy-cockroachdb-on-microsoft-azure.md +++ /dev/null @@ -1,452 +0,0 @@ ---- -title: Deploy CockroachDB on Microsoft Azure -summary: Learn how to deploy CockroachDB on Microsoft Azure. -toc: true -toc_not_nested: true ---- - -
- - -
- -This page shows you how to manually deploy a secure multi-node CockroachDB cluster on Microsoft Azure, using Azure's managed load balancing service to distribute client traffic. - -If you are only testing CockroachDB, or you are not concerned with protecting network communication with TLS encryption, you can use an insecure cluster instead. Select **Insecure** above for instructions. - - -## Requirements - -- Locally, you must have [CockroachDB installed](install-cockroachdb.html), which you'll use to generate and manage your deployment's certificates. - -- In Azure, you must have SSH access to each machine with root or sudo privileges. This is necessary for distributing binaries and starting CockroachDB. - -## Recommendations - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -- Decide how you want to access your Admin UI: - - Only from specific IP addresses, which requires you to set firewall rules to allow communication on port `8080` *(documented on this page)*. - - Using an SSH tunnel, which requires you to use `--http-host=localhost` when starting your nodes. - -## Step 1. Configure your network - -CockroachDB requires TCP communication on two ports: - -- **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster), for applications to connect to the load balancer, and for routing from the load balancer to nodes -- **8080** (`tcp:8080`) for exposing your Admin UI - -To enable this in Azure, you must create a Resource Group, Virtual Network, and Network Security Group. - -1. [Create a Resource Group](https://azure.microsoft.com/en-us/updates/create-empty-resource-groups/). -2. [Create a Virtual Network](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-create-vnet-arm-pportal) that uses your **Resource Group**. -3. [Create a Network Security Group](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-create-nsg-arm-pportal) that uses your **Resource Group**, and then add the following **inbound** rules to it: - - **Admin UI support**: - - | Field | Recommended Value | - |-------|-------------------| - | Name | **cockroachadmin** | - | Source | **IP Addresses** | - | Source IP addresses/CIDR ranges | Your local network’s IP ranges | - | Source port ranges | * | - | Destination | **Any** | - | Destination port range | **8080** | - | Protocol | **TCP** | - | Action | **Allow** | - | Priority | Any value > 1000 | - - **Application support**: - - {{site.data.alerts.callout_success}}If your application is also hosted on the same Azure Virtual Network, you will not need to create a firewall rule for your application to communicate with your load balancer.{{site.data.alerts.end}} - - | Field | Recommended Value | - |-------|-------------------| - | Name | **cockroachapp** | - | Source | **IP Addresses** | - | Source IP addresses/CIDR ranges | Your local network’s IP ranges | - | Source port ranges | * | - | Destination | **Any** | - | Destination port range | **26257** | - | Protocol | **TCP** | - | Action | **Allow** | - | Priority | Any value > 1000 | -## Step 2. Create VMs - -[Create Linux VMs](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/quick-create-portal) for each node you plan to have in your cluster. We [recommend](recommended-production-settings.html#cluster-topology): - -- Running at least 3 nodes to ensure survivability. -- Selecting the same continent for all of your VMs for best performance. - -When creating the VMs, make sure to select the **Resource Group**, **Virtual Network**, and **Network Security Group** you created. - -## Step 3. Set up load balancing - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - -Microsoft Azure offers fully-managed load balancing to distribute traffic between instances. - -1. [Add Azure load balancing](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-overview). Be sure to: - - Set forwarding rules to route TCP traffic from the load balancer's port **26257** to port **26257** on the node Droplets. - - Configure health checks to use HTTP port **8080** and path `/health`. - -2. Note the provisioned **IP Address** for the load balancer. You'll use this later to test load balancing and to connect your application to the cluster. - -{{site.data.alerts.callout_info}}If you would prefer to use HAProxy instead of Azure's managed load balancing, see Manual Deployment for guidance.{{site.data.alerts.end}} - -## Step 4. Generate certificates - -Locally, you'll need to [create the following certificates and keys](create-security-certificates.html): - -- A certificate authority (CA) key pair (`ca.crt` and `ca.key`). -- A node key pair for each node, issued to its IP addresses and any common names the machine uses, as well as to the IP address provisioned for the Azure load balancer. -- A client key pair for the `root` user. - -{{site.data.alerts.callout_success}}Before beginning, it's useful to collect each of your machine's internal and external IP addresses, as well as any server names you want to issue certificates for.{{site.data.alerts.end}} - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if you haven't already. - -2. Create two directories: - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir certs - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir my-safe-directory - ~~~ - - `certs`: You'll generate your CA certificate and all node and client certificates and keys in this directory and then upload some of the files to your nodes. - - `my-safe-directory`: You'll generate your CA key in this directory and then reference the key when generating node and client certificates. After that, you'll keep the key safe and secret; you will not upload it to your nodes. - -3. Create the CA certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-ca \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -4. Create the certificate and key for the first node, issued to all common names you might use to refer to the node as well as to addresses provisioned for the Azure load balancer: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - `` which is the VM's **Private IP address** (available on the VM's **Network Interface**). - - `` which is the VM's **Public IP address** (available on the VM's **Network Interface**). - - `` which is the VM's **Name**. - - `` which include any domain names you point to the instance. - - `localhost` and `127.0.0.1` - - `` - - `` - -5. Upload certificates to the first node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -6. Delete the local copy of the node certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ rm certs/node.crt certs/node.key - ~~~ - - {{site.data.alerts.callout_info}}This is necessary because the certificates and keys for additional nodes will also be named node.crt and node.key As an alternative to deleting these files, you can run the next cockroach cert create-node commands with the --overwrite flag.{{site.data.alerts.end}} - -7. Create the certificate and key for the second node, issued to all common names you might use to refer to the node as well as to addresses provisioned for the Azure load balancer: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -8. Upload certificates to the second node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -9. Repeat steps 6 - 8 for each additional node. - -10. Create a client certificate and key for the `root` user: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-client \ - root \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {{site.data.alerts.callout_success}}In later steps, you'll use the root user's certificate to run cockroach client commands from your local machine. If you might also want to run cockroach client commands directly on a node (e.g., for local debugging), you'll need to copy the root user's certificate and key to that node as well.{{site.data.alerts.end}} - -## Step 5. Start the first node - -1. SSH to your instance: - - {% include copy-clipboard.html %} - ~~~ shell - $ ssh @ - ~~~ - -2. Install the latest CockroachDB binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball. - $ wget https://s3.amazonaws.com/binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node, specifying the location of certificates and the address at which other nodes can reach it: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start \ - --background \ - --certs-dir=certs \ - --advertise-host= - ~~~ - -## Step 6. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by setting up additional nodes that will join the cluster. - -1. SSH to your instance: - - {% include copy-clipboard.html %} - ~~~ - $ ssh @ - ~~~ - -2. Install the latest CockroachDB binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's internal IP address: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start \ - --background \ - --certs-dir=certs \ - --advertise-host= \ - --join=:26257 - ~~~ - -4. Repeat these steps for each instance you want to use as a node. - -## Step 7. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, launch the built-in SQL client with the `--host` flag set to the external address of node 1 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. Create a `securenodetest` database: - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE DATABASE securenodetest; - ~~~ - -3. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -4. Launch built-in SQL client with the `--host` flag set to the external address of node 2 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -5. View the cluster's databases, which will include `securenodetest`: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Test load balancing - -The Azure load balancer created in [step 3](#step-3-set-up-load-balancing) can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients can connect to the load balancer, which will then redirect the connection to a CockroachDB node. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, launch the built-in SQL client, with the `--host` flag set to the load balancer's IP address: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. View the cluster's databases: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, the load balancer redirected the query to one of the CockroachDB nodes. - -3. Check which node you were redirected to: - - {% include copy-clipboard.html %} - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - - ~~~ - +---------+ - | node_id | - +---------+ - | 3 | - +---------+ - (1 row) - ~~~ - -4. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 9. Monitor the cluster - -View your cluster's Admin UI by going to `https://:8080`. - -{{site.data.alerts.callout_info}}Note that your browser will consider the CockroachDB-created certificate invalid; you’ll need to click through a warning message to get to the UI.{{site.data.alerts.end}} - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - -2. Click the **Databases** tab on the left to verify that `securenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## Step 10. Use the database - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the Azure load balancer, not to a CockroachDB node. - -## See Also - -- [GCE Deployment](deploy-cockroachdb-on-google-cloud-platform.html) -- [Digital Ocean Deployment](deploy-cockroachdb-on-digital-ocean.html) -- [AWS Deployment](deploy-cockroachdb-on-aws.html) -- [Manual Deployment](manual-deployment.html) -- [Orchestration](orchestration.html) diff --git a/src/current/v1.0/diagnostics-reporting.md b/src/current/v1.0/diagnostics-reporting.md deleted file mode 100644 index de907f1e72c..00000000000 --- a/src/current/v1.0/diagnostics-reporting.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: Diagnostics Reporting -summary: Learn about the diagnostic details that get shared with CockroachDB and how to opt out of sharing. -toc: true ---- - -By default, each node of a CockroachDB cluster shares anonymous usage details with Cockroach Labs on an hourly basis. These details, which are completely scrubbed of identifiable information, greatly help us understand and improve how the system behaves in real-world scenarios. - -This page explains the details that get shared and how to opt out of sharing. - -{{site.data.alerts.callout_success}}For insights into your cluster's performance and health, use the built-in Admin UI or a third-party monitoring tool like Prometheus.{{site.data.alerts.end}} - - -## What Gets Shared - -When diagnostics reporting is on, each node of a CockroachDB cluster shares anonymized storage details, SQL table structure details, and SQL query statistics with Cockroach Labs on an hourly basis, as well as crash reports as they occur. Please note that the details that get shared may change over time, but as that happens, we will update this page and announce the changes in release notes. - -### Storage Details - -Each node of a CockroachDB cluster shares the following storage details on an hourly basis: - -Detail | Description --------|------------ -Node ID | The internal ID of the node. -Store ID | The internal ID of each store on the node. -Bytes | The amount of live data used by applications and the CockroachDB system on the node and per store. This excludes historical and deleted data. -Range Count | The number of ranges on the node and per store. -Key Count | The number of keys stored on the node and per store. - -#### Example - -This JSON example shows what storage details look like when sent to Cockroach Labs, in this case for a node with two stores. - -~~~ json -{ - "node":{ - "node_id":1, - "bytes":64828, - "key_count":138, - "range_count":12 - }, - "stores":[ - { - "node_id":1, - "store_id":1, - "bytes":64828, - "key_count":138, - "range_count":12 - }, - { - "node_id":1, - "store_id":2, - "bytes":0, - "key_count":0, - "range_count":0 - } - ] -} -~~~ - -### SQL Table Structure Details - -Each node of a CockroachDB cluster shares the following details about the structure of each table stored on the node on an hourly basis: - -{{site.data.alerts.callout_info}}No actual table data or table/column names are shared, just metadata about the structure of tables. All names and other string values are scrubbed and replaced with underscores.{{site.data.alerts.end}} - -Detail | Description --------|------------ -Table | Metadata about each table, such as its internal ID, when it was last modified, and how many times it has been renamed. Table names are replaced with underscores. -Column | Metadata about each column in a table, such as its internal ID and type. Column names are replaced with underscores. -Column Families | Metadata about [column families](column-families.html) in a table, such as its internal ID and the columns included in the family. Family and column names are replaced with underscores. -Indexes | Metadata about the primary index and any secondary indexes on the table, such as the internal ID of an index and the columns covered by an index. All index, column, and other strings are replaced with underscores. -Privileges | Metadata about user [privileges](privileges.html) on the table, such as the number of privileges granted to each user. Usernames are replaced with underscores. -Checks | Metadata about any [check constraints](check.html) on the table. Check constraint names and expressions are replaced with underscores. - -#### Example - -This JSON example shows an excerpt of what table structure details look like when sent to Cockroach Labs, in this case for a node with just one table. Note that all names and other strings have been scrubbed and replaced with underscores. - -~~~ json -{ - "schema":[ - { - "name":"_", - "id":51, - "parent_id":50, - "version":1, - "up_version":false, - "modification_time":{ - "wall_time":0, - "logical":0 - }, - "columns":[ - { - "name":"_", - "id":1, - "type":{ - "kind":1, - "width":0, - "precision":0 - }, - "nullable":true, - "default_expr":"_", - "hidden":false - }, - ... - ], - ... - } - ] -} -~~~ - -### SQL Query Statistics - -Each node of a CockroachDB cluster shares the following statistics about the SQL queries it has executed on an hourly basis: - -{{site.data.alerts.callout_info}}No query results are shared, just the queries themselves, with all names and other strings scrubbed and replaced with underscores, and statistics about the queries.{{site.data.alerts.end}} - -Detail | Description --------|------------ -Query | The query executed. Names and other strings are replaced with underscores. -Counts | The number of times the query was executed, the number of times the query was committed on the first attempt (without retries), and the maximum observed number of times the query was retried automatically. -Last Error | The last error the query encountered. -Rows | The number of rows returned or observed. -Latencies | The amount of time involved in various aspects of the query, for example, the time to parse the query, the time to plan the query, and the time to run the query and fetch/compute results. - -#### Example - -This JSON example shows an excerpt of what query statistics look like when sent to Cockroach Labs. Note that all names and other strings have been scrubbed from the queries and replaced with underscores. - -~~~ json -{ - "sqlstats": { - "-3750763034362895579": { - "CREATE DATABASE _": { - "count": 1, - "first_attempt_count": 1, - "max_retries": 0, - "last_err": "", - "num_rows": { - "mean": 0, - "squared_diffs": 0 - }, - "parse_lat": { - "mean": 0.00010897, - "squared_diffs": 0 - }, - "plan_lat": { - "mean": 0.000011004, - "squared_diffs": 0 - }, - "run_lat": { - "mean": 0.002049073, - "squared_diffs": 0 - }, - "service_lat": { - "mean": 0.00220478, - "squared_diffs": 0 - }, - "overhead_lat": { - "mean": 0.0000357329999999996, - "squared_diffs": 0 - } - }, - "INSERT INTO _ VALUES (_)": { - "count": 10, - "first_attempt_count": 10, - "max_retries": 0, - "last_err": "", - "num_rows": { - "mean": 2, - "squared_diffs": 0 - }, - "parse_lat": { - "mean": 0.000021831200000000002, - "squared_diffs": 5.024879776000002e-10 - }, - "plan_lat": { - "mean": 0.00007221249999999999, - "squared_diffs": 7.744142312499998e-9 - }, - "run_lat": { - "mean": 0.0003641647, - "squared_diffs": 1.0141981141410002e-7 - }, - "service_lat": { - "mean": 0.00048527110000000004, - "squared_diffs": 2.195025173849e-7 - }, - "overhead_lat": { - "mean": 0.00002706270000000002, - "squared_diffs": 2.347266118100001e-9 - } - }, - ... - } - } -} -~~~ - -## Opt Out of Diagnostics Reporting - -### At Cluster Initialization - -To make sure that absolutely no diagnostic details are shared, you can set the environment variable `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING=true` before starting the first node of the cluster. Note that this works only when set before starting the first node of the cluster. Once the cluster is running, you need to use the `SET` method described below. - -### After Cluster Initialization - -To stop sending diagnostic details to Cockroach Labs once a cluster is running, [use the built-in SQL client](use-the-built-in-sql-client.html) to execute the following [`SET CLUSTER SETTING`](set-cluster-setting.html) statement, which switches the `diagnostics.reporting.enabled` [cluster setting](cluster-settings.html) to `false`: - -~~~ sql -> SET CLUSTER SETTING diagnostics.reporting.enabled = false; -~~~ - -This change will not be instantaneous, as it must be propagated to other nodes in the cluster. - -## Check the State of Diagnostics Reporting - -To check the state of diagnostics reporting, [use the built-in SQL client](use-the-built-in-sql-client.html) to execute the following [`SHOW CLUSTER SETTING`](show-cluster-setting.html) statement: - -~~~ sql -> SHOW CLUSTER SETTING diagnostics.reporting.enabled; -~~~ - -~~~ -+-------------------------------+ -| diagnostics.reporting.enabled | -+-------------------------------+ -| false | -+-------------------------------+ -(1 row) -~~~ - -If the setting is `false`, diagnostics reporting is off; if the setting is `true`, diagnostics reporting is on. - -## See Also - -- [Cluster Settings](cluster-settings.html) -- [Start a Node](start-a-node.html) diff --git a/src/current/v1.0/distributed-transactions.md b/src/current/v1.0/distributed-transactions.md deleted file mode 100644 index 6d282aedd3d..00000000000 --- a/src/current/v1.0/distributed-transactions.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Distributed Transactions -summary: CockroachDB implements efficient, fully-serializable distributed transactions. -toc: false ---- - -CockroachDB distributes [transactions](transactions.html) across your cluster, whether it’s a few servers in a single location or many servers across multiple datacenters. Unlike with sharded setups, you don’t need to know the precise location of data; you just talk to any node in your cluster and CockroachDB gets your transaction to the right place seamlessly. Distributed transactions proceed without downtime or additional latency while rebalancing is underway. You can even move tables – or entire databases – between data centers or cloud infrastructure providers while the cluster is under load. - -- Easily build consistent applications -- Optimistic concurrency with distributed deadlock detection -- Serializable default isolation level - -Distributed transactions in CockroachDB - -## See Also - -- [How CockroachDB Does Distributed, Atomic Transactions](https://www.cockroachlabs.com/blog/how-cockroachdb-distributes-atomic-transactions/) -- [Serializable, Lockless, Distributed: Isolation in CockroachDB](https://www.cockroachlabs.com/blog/serializable-lockless-distributed-isolation-cockroachdb/) diff --git a/src/current/v1.0/drop-column.md b/src/current/v1.0/drop-column.md deleted file mode 100644 index 94fbdab3bf0..00000000000 --- a/src/current/v1.0/drop-column.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: DROP COLUMN -summary: Use the ALTER COLUMN statement to remove columns from tables. -toc: true ---- - -The `DROP COLUMN` [statement](sql-statements.html) is part of `ALTER TABLE` and removes columns from a table. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/drop_column.html %} - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table with the column you want to drop. | -| `name` | The name of the column you want to drop. | -| `CASCADE` | Drop the column even if objects (such as [views](views.html)) depend on it; drop the dependent objects, as well.

`CASCADE` does not list objects it drops, so should be used cautiously.

However, `CASCADE` will not drop dependent indexes; you must use [`DROP INDEX`](drop-index.html). This also prevents `CASCADE` from dropping columns with Foreign Key constraints. | -| `RESTRICT` | *(Default)* Do not drop the column if any objects (such as [views](views.html)) depend on it. | - -## Examples - -### Drop Columns - -If you no longer want a column in a table, you can drop it. - -``` sql -> ALTER TABLE orders DROP COLUMN billing_zip; -``` - -### Prevent Dropping Columns with Dependent Objects (`RESTRICT`) - -If the column has dependent objects, such as [views](views.html), CockroachDB will not drop the column by default; however, if you want to be sure of the behavior you can include the `RESTRICT` clause. - -``` sql -> ALTER TABLE orders DROP COLUMN customer RESTRICT; -``` -``` -pq: cannot drop column "customer" because view "customer_view" depends on it -``` - -### Drop Column & Dependent Objects (`CASCADE`) - -If you want to drop the column and all of its dependent options, include the `CASCADE` clause. - -{{site.data.alerts.callout_danger}}CASCADE does not list objects it drops, so should be used cautiously.{{site.data.alerts.end}} - -``` sql -> SHOW CREATE VIEW customer_view; -``` -``` -+---------------+----------------------------------------------------------------+ -| View | CreateView | -+---------------+----------------------------------------------------------------+ -| customer_view | CREATE VIEW customer_view AS SELECT customer FROM store.orders | -+---------------+----------------------------------------------------------------+ -``` -``` sql -> ALTER TABLE orders DROP COLUMN customer CASCADE; -> SHOW CREATE VIEW customer_view; -``` -``` -pq: view "customer_view" does not exist -``` - -## See Also - -- [`DROP CONSTRAINT`](drop-constraint.html) -- [`DROP INDEX`](drop-index.html) -- [`ALTER TABLE`](alter-table.html) diff --git a/src/current/v1.0/drop-constraint.md b/src/current/v1.0/drop-constraint.md deleted file mode 100644 index 75f40e83215..00000000000 --- a/src/current/v1.0/drop-constraint.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: DROP CONSTRAINT -summary: Use the ALTER CONSTRAINT statement to remove constraints from columns. -toc: true ---- - -The `DROP CONSTRAINT` [statement](sql-statements.html) is part of `ALTER TABLE` and removes Check and Foreign Key constraints from columns. - -{{site.data.alerts.callout_info}}For information about removing other constraints, see Constraints: Remove Constraints.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/drop_constraint.html %} - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table with the constraint you want to drop. | -| `name` | The name of the constraint you want to drop. | - -## Example - -~~~ sql -> SHOW CONSTRAINTS FROM orders; -~~~ -~~~ -+--------+---------------------------+-------------+-----------+----------------+ -| Table | Name | Type | Column(s) | Details | -+--------+---------------------------+-------------+-----------+----------------+ -| orders | fk_customer_ref_customers | FOREIGN KEY | customer | customers.[id] | -| orders | primary | PRIMARY KEY | id | NULL | -+--------+---------------------------+-------------+-----------+----------------+ -~~~ -~~~ sql -> ALTER TABLE orders DROP CONSTRAINT fk_customer_ref_customers; -~~~ -~~~ -ALTER TABLE -~~~ -~~~ sql -> SHOW CONSTRAINTS FROM orders; -~~~ -~~~ -+--------+---------+-------------+-----------+---------+ -| Table | Name | Type | Column(s) | Details | -+--------+---------+-------------+-----------+---------+ -| orders | primary | PRIMARY KEY | id | NULL | -+--------+---------+-------------+-----------+---------+ -~~~ - -{{site.data.alerts.callout_info}}You cannot drop the primary constraint, which indicates your table's Primary Key.{{site.data.alerts.end}} - -## See Also - -- [`DROP COLUMN`](drop-column.html) -- [`DROP INDEX`](drop-index.html) -- [`ALTER TABLE`](alter-table.html) diff --git a/src/current/v1.0/drop-database.md b/src/current/v1.0/drop-database.md deleted file mode 100644 index a3d204f51a7..00000000000 --- a/src/current/v1.0/drop-database.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: DROP DATABASE -summary: The DROP DATABASE statement removes a database and all its objects from a CockroachDB cluster. -toc: true ---- - -The `DROP DATABASE` [statement](sql-statements.html) removes a database and all its objects from a CockroachDB cluster. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/drop_database.html %} - -## Required Privileges - -The user must have the `DROP` [privilege](privileges.html) on the database and on all tables in the database. - -## Parameters - -Parameter | Description -----------|------------ -`IF EXISTS` | Drop the database if it exists; if it does not exist, do not return an error. -`name` | The name of the database you want to drop. - - -## Examples - -~~~ sql -> SHOW DATABASES; -~~~ -~~~ -+----------+ -| Database | -+----------+ -| db1 | -| system | -+----------+ -~~~ -~~~ sql -> DROP DATABASE db1; - -> DROP DATABASE db2; -~~~ -~~~ -pq: database "db2" does not exist -~~~ - -To avoid an error in case the database does not exist, you can include `IF EXISTS`: - -~~~ sql -> DROP DATABASE IF EXISTS db2; - -> SHOW DATABASES; -~~~ -~~~ -+----------+ -| Database | -+----------+ -| system | -+----------+ -~~~ - -{{site.data.alerts.callout_danger}}DROP DATABASE drops all tables within the database as well as objects dependent on the tables without listing the tables or the dependent objects. This can lead to inadvertent and difficult-to-recover losses. To avoid potential harm, we recommend dropping objects individually in most cases.{{site.data.alerts.end}} - - -## See Also - -- [`CREATE DATABASE`](create-database.html) -- [`SHOW DATABASES`](show-databases.html) -- [`RENAME DATABASE`](rename-database.html) -- [`SET DATABASE`](set-vars.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/drop-index.md b/src/current/v1.0/drop-index.md deleted file mode 100644 index 2f4b6cd6d94..00000000000 --- a/src/current/v1.0/drop-index.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: DROP INDEX -summary: The DROP INDEX statement removes indexes from tables. -toc: true ---- - -The `DROP INDEX` [statement](sql-statements.html) removes indexes from tables. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/drop_index.html %} - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on each specified table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `IF EXISTS` | Drop the named indexes if they exist; if they do not exist, do not return an error.| -| `table_name` | The name of the table with the index you want to drop. Find table names with [`SHOW TABLES`](show-tables.html).| -| `index_name` | The name of the index you want to drop. Find index names with [`SHOW INDEX`](show-index.html).

You cannot drop a table's `primary` index.| -| `CASCADE` | Drop all objects (such as [constraints](constraints.html)) that depend on the indexes.

`CASCADE` does not list objects it drops, so should be used cautiously.| -| `RESTRICT` | _(Default)_ Do not drop the indexes if any objects (such as [constraints](constraints.html)) depend on them.| - -## Examples - -### Remove an Index (No Dependencies) -~~~ sql -> SHOW INDEX FROM tbl; -~~~ -~~~ -+-------+--------------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+-------+--------------+--------+-----+--------+-----------+---------+----------+ -| tbl | primary | true | 1 | id | ASC | false | false | -| tbl | tbl_name_idx | false | 1 | name | ASC | false | false | -| tbl | tbl_name_idx | false | 2 | id | ASC | false | true | -+-------+--------------+--------+-----+--------+-----------+---------+----------+ -(3 rows) -~~~ -~~~ sql -> DROP INDEX tbl@tbl_name_idx; - -> SHOW INDEX FROM tbl; -~~~ -~~~ -+-------+---------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+-------+---------+--------+-----+--------+-----------+---------+----------+ -| tbl | primary | true | 1 | id | ASC | false | false | -+-------+---------+--------+-----+--------+-----------+---------+----------+ -(1 row) -~~~ - -### Remove an Index and Dependent Objects with `CASCADE` - -{{site.data.alerts.callout_danger}}CASCADE drops all dependent objects without listing them, which can lead to inadvertent and difficult-to-recover losses. To avoid potential harm, we recommend dropping objects individually in most cases.{{site.data.alerts.end}} - -~~~ sql -> SHOW INDEX FROM orders; -~~~ -~~~ -+--------+---------------------+--------+-----+----------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+--------+---------------------+--------+-----+----------+-----------+---------+----------+ -| orders | primary | true | 1 | id | ASC | false | false | -| orders | orders_customer_idx | false | 1 | customer | ASC | false | false | -| orders | orders_customer_idx | false | 2 | id | ASC | false | true | -+--------+---------------------+--------+-----+----------+-----------+---------+----------+ -(3 rows) -~~~ -~~~ sql -> DROP INDEX orders@orders_customer_idx; -~~~ -~~~ -pq: index "orders_customer_idx" is in use as a foreign key constraint -~~~ -~~~ sql -> SHOW CONSTRAINTS FROM orders; -~~~ -~~~ -+--------+---------------------------+-------------+------------+----------------+ -| Table | Name | Type | Column(s) | Details | -+--------+---------------------------+-------------+------------+----------------+ -| orders | fk_customer_ref_customers | FOREIGN KEY | [customer] | customers.[id] | -| orders | primary | PRIMARY KEY | [id] | NULL | -+--------+---------------------------+-------------+------------+----------------+ -~~~ -~~~ sql -> DROP INDEX orders@orders_customer_idx CASCADE; - -> SHOW CONSTRAINTS FROM orders; -~~~ -~~~ -+--------+---------+-------------+-----------+---------+ -| Table | Name | Type | Column(s) | Details | -+--------+---------+-------------+-----------+---------+ -| orders | primary | PRIMARY KEY | [id] | NULL | -+--------+---------+-------------+-----------+---------+ -~~~ diff --git a/src/current/v1.0/drop-table.md b/src/current/v1.0/drop-table.md deleted file mode 100644 index 21002cc5844..00000000000 --- a/src/current/v1.0/drop-table.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: DROP TABLE -summary: The DROP TABLE statement removes a table and all its indexes from a database. -toc: true ---- - -The `DROP TABLE` [statement](sql-statements.html) removes a table and all its indexes from a database. - - -## Required Privileges - -The user must have the `DROP` [privilege](privileges.html) on the specified table(s). If `CASCADE` is used, the user must have the privileges required to drop each dependent object as well. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/drop_table.html %} - -## Parameters - -Parameter | Description -----------|------------ -`IF EXISTS` | Drop the table if it exists; if it does not exist, do not return an error. -`table_name` | A comma-separated list of table names. To find table names, use [`SHOW TABLES`](show-tables.html). -`CASCADE` | Drop all objects (such as [constraints](constraints.html) and [views](views.html)) that depend on the table.

`CASCADE` does not list objects it drops, so should be used cautiously. -`RESTRICT` | _(Default)_ Do not drop the table if any objects (such as [constraints](constraints.html) and [views](views.html)) depend on it. - -## Examples - -### Remove a Table (No Dependencies) - -In this example, other objects do not depend on the table being dropped. - -~~~ sql -> SHOW TABLES FROM bank; -~~~ - -~~~ -+--------------------+ -| Table | -+--------------------+ -| accounts | -| branches | -| user_accounts_view | -+--------------------+ -(3 rows) -~~~ - -~~~ sql -> DROP TABLE bank.branches; -~~~ - -~~~ -DROP TABLE -~~~ - -~~~ sql -> SHOW TABLES FROM bank; -~~~ - -~~~ -+--------------------+ -| Table | -+--------------------+ -| accounts | -| user_accounts_view | -+--------------------+ -(2 rows) -~~~ - -### Remove a Table and Dependent Objects with `CASCADE` - -In this example, a view depends on the table being dropped. Therefore, it's only possible to drop the table while simultaneously dropping the dependent view using `CASCADE`. - -{{site.data.alerts.callout_danger}}CASCADE drops all dependent objects without listing them, which can lead to inadvertent and difficult-to-recover losses. To avoid potential harm, we recommend dropping objects individually in most cases.{{site.data.alerts.end}} - -~~~ sql -> SHOW TABLES FROM bank; -~~~ - -~~~ -+--------------------+ -| Table | -+--------------------+ -| accounts | -| user_accounts_view | -+--------------------+ -(2 rows) -~~~ - -~~~ sql -> DROP TABLE bank.accounts; -~~~ - -~~~ -pq: cannot drop table "accounts" because view "user_accounts_view" depends on it -~~~ - -~~~sql -> DROP TABLE bank.accounts CASCADE; -~~~ - -~~~ -DROP TABLE -~~~ - -~~~ sql -> SHOW TABLES FROM bank; -~~~ - -~~~ -+-------+ -| Table | -+-------+ -+-------+ -(0 rows) -~~~ - -## See Also - -- [`ALTER TABLE`](alter-table.html) -- [`CREATE TABLE`](create-table.html) -- [`INSERT`](insert.html) -- [`RENAME TABLE`](rename-table.html) -- [`SHOW COLUMNS`](show-columns.html) -- [`SHOW TABLES`](show-tables.html) -- [`UPDATE`](update.html) -- [`DELETE`](delete.html) -- [`DROP INDEX`](drop-index.html) -- [`DROP VIEW`](drop-view.html) diff --git a/src/current/v1.0/drop-view.md b/src/current/v1.0/drop-view.md deleted file mode 100644 index e3d4a54637b..00000000000 --- a/src/current/v1.0/drop-view.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: DROP VIEW -summary: The DROP VIEW statement removes a view from a database. -toc: true ---- - -The `DROP VIEW` [statement](sql-statements.html) removes a [view](views.html) from a database. - - -## Required Privileges - -The user must have the `DROP` [privilege](privileges.html) on the specified view(s). If `CASCADE` is used to drop dependent views, the user must have the `DROP` privilege on each dependent view as well. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/drop_view.html %} - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `IF EXISTS` | Drop the view if it exists; if it does not exist, do not return an error.| -| `view_name` | A comma-separated list of view names. To find view names, use:

`SELECT * FROM information_schema.tables WHERE table_type = 'VIEW';`| -| `CASCADE` | Drop other views that depend on the view being dropped.

`CASCADE` does not list views it drops, so should be used cautiously.| -| `RESTRICT` | _(Default)_ Do not drop the view if other views depend on it.| - -## Examples - -### Remove a View (No Dependencies) - -In this example, other views do not depend on the view being dropped. - -~~~ sql -> SELECT * FROM information_schema.tables WHERE table_type = 'VIEW'; -~~~ - -~~~ -+---------------+-------------------+--------------------+------------+---------+ -| TABLE_CATALOG | TABLE_SCHEMA | TABLE_NAME | TABLE_TYPE | VERSION | -+---------------+-------------------+--------------------+------------+---------+ -| def | bank | user_accounts | VIEW | 1 | -| def | bank | user_emails | VIEW | 1 | -+---------------+-------------------+--------------------+------------+---------+ -(2 rows) -~~~ - -~~~ sql -> DROP VIEW bank.user_emails; -~~~ - -~~~ -DROP VIEW -~~~ - -~~~ sql -> SELECT * FROM information_schema.tables WHERE table_type = 'VIEW'; -~~~ - -~~~ -+---------------+-------------------+--------------------+------------+---------+ -| TABLE_CATALOG | TABLE_SCHEMA | TABLE_NAME | TABLE_TYPE | VERSION | -+---------------+-------------------+--------------------+------------+---------+ -| def | bank | user_accounts | VIEW | 1 | -+---------------+-------------------+--------------------+------------+---------+ -(1 row) -~~~ - -### Remove a View (With Dependencies) - -In this example, another view depends on the view being dropped. Therefore, it's only possible to drop the view while simultaneously dropping the dependent view using `CASCADE`. - -{{site.data.alerts.callout_danger}}CASCADE drops all dependent views without listing them, which can lead to inadvertent and difficult-to-recover losses. To avoid potential harm, we recommend dropping objects individually in most cases.{{site.data.alerts.end}} - -~~~ sql -> SELECT * FROM information_schema.tables WHERE table_type = 'VIEW'; -~~~ - -~~~ -+---------------+-------------------+--------------------+------------+---------+ -| TABLE_CATALOG | TABLE_SCHEMA | TABLE_NAME | TABLE_TYPE | VERSION | -+---------------+-------------------+--------------------+------------+---------+ -| def | bank | user_accounts | VIEW | 1 | -| def | bank | user_emails | VIEW | 1 | -+---------------+-------------------+--------------------+------------+---------+ -(2 rows) -~~~ - -~~~ sql -> DROP VIEW bank.user_accounts; -~~~ - -~~~ -pq: cannot drop view "user_accounts" because view "user_emails" depends on it -~~~ - -~~~sql -> DROP VIEW bank.user_accounts CASCADE; -~~~ - -~~~ -DROP VIEW -~~~ - -~~~ sql -> SELECT * FROM information_schema.tables WHERE table_type = 'VIEW'; -~~~ - -~~~ -+---------------+-------------------+--------------------+------------+---------+ -| TABLE_CATALOG | TABLE_SCHEMA | TABLE_NAME | TABLE_TYPE | VERSION | -+---------------+-------------------+--------------------+------------+---------+ -| def | bank | create_test | VIEW | 1 | -+---------------+-------------------+--------------------+------------+---------+ -(1 row) -~~~ - -## See Also - -- [Views](views.html) -- [`CREATE VIEW`](create-view.html) -- [`SHOW CREATE VIEW`](show-create-view.html) -- [`ALTER VIEW`](alter-view.html) diff --git a/src/current/v1.0/explain.md b/src/current/v1.0/explain.md deleted file mode 100644 index c653ffb5d0c..00000000000 --- a/src/current/v1.0/explain.md +++ /dev/null @@ -1,294 +0,0 @@ ---- -title: EXPLAIN -summary: The EXPLAIN statement provides information you can use to optimize SQL queries. -toc: true ---- - -The `EXPLAIN` [statement](sql-statements.html) returns CockroachDB's query plan for an [explainable statement](#explainable-statements). You can then use this information to optimize the query. - - -## Explainable Statements - -You can use `EXPLAIN` on the following statements: - -- [`ALTER TABLE`](alter-table.html) -- [`CREATE DATABASE`](create-database.html), [`CREATE INDEX`](create-index.html), [`CREATE TABLE`](create-table.html), [`CREATE TABLE AS`](create-table-as.html), [`CREATE USER`](create-user.html), [`CREATE VIEW`](create-view.html) -- [`DELETE`](delete.html) -- `EXPLAIN` -- [`INSERT`](insert.html) -- [`SELECT`](select.html) -- [`SHOW COLUMNS`](show-columns.html), [`SHOW CONSTRAINTS`](show-constraints.html), [`SHOW CREATE TABLE`](show-create-table.html), [`SHOW CREATE VIEW`](show-create-view.html), [`SHOW CLUSTER SETTING`](show-cluster-setting.html), [`SHOW DATABASES`](show-databases.html), [`SHOW GRANTS`](show-grants.html), [`SHOW INDEX`](show-index.html), [`SHOW TABLES`](show-tables.html), [`SHOW USERS`](show-users.html) -- [`UPDATE`](update.html) - -## Query Optimization - -Using `EXPLAIN`'s output, you can optimize your queries by taking the following points into consideration: - -- Queries with fewer levels execute more quickly. Restructuring queries to require fewer levels of processing will generally improve performance. - -- Avoid scanning an entire table, which is the slowest way to access data. You can avoid this by [creating indexes](indexes.html) that contain at least one of the columns that the query is filtering in its `WHERE` clause. - -You can find out if your queries are performing entire table scans by using `EXPLAIN` to see which: - -- Indexes the query uses; shown as the **Description** value of rows with the **Field** value of `table` - -- Key values in the index are being scanned; shown as the **Description** value of rows with the **Field** value of `spans` - -For more information, see [Find the Indexes and Key Ranges a Query Uses](#find-the-indexes-and-key-ranges-a-query-uses). - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/explain.html %} - -## Required Privileges - -The user requires the appropriate [privileges](privileges.html) for the statement being explained. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `EXPRS` | Include the SQL expressions that are involved in each processing stage. | -| `QUALIFY` | Include table names when referencing columns, which might be important to verify the behavior of joins across tables with the same column names.

To list qualified names, `QUALIFY` requires you to include the `EXPRS` option. | -| `METADATA` | Include the columns each level uses in the **Columns** column, as well as **Ordering** detail. | -| `VERBOSE` | Imply the `EXPRS`, `METADATA`, and `QUALIFY` options. | -| `TYPES` | Include the intermediate [data types](data-types.html) CockroachDB chooses to evaluate intermediate SQL expressions.

`TYPES` also implies `METADATA` and `EXPRS` options.| -| `explainable_stmt` | The [statement](#explainable-statements) you want details about. | - -{{site.data.alerts.callout_danger}}EXPLAIN also includes other modes besides query plans that are useful only to CockroachDB developers, which are not documented here.{{site.data.alerts.end}} - -## Success Responses - -Successful `EXPLAIN` statements return tables with the following columns: - -| Column | Description | -|-----------|-------------| -| **Level** | The level of hierarchy of the query plan.

`0` represents the last processing stage that produces the results sent to the SQL client receiving the data; the highest level represents the operation at the key-value layer accessing data.

The query plan has a tree structure; it is thus possible to see multiple processing stages at the same level, which indicates they are sibling stages feeding data to the previous processing stage with a lower level.| -| **Type** | The query plan node's type, which are described in the [CockroachDB source on GitHub](https://github.com/cockroachdb/cockroach/pull/10055/files#diff-542aa8b21b245d1144c920577333ceedR764). | -| **Field** | The type of parameter being used by the query plan node. | -| **Description** | Additional information describing the **Field** value. | -| **Columns** | The columns provided to the processes at lower levels of the hierarchy.

This column displays only if the `METADATA` option is specified or implied. | -| **Ordering** | The order in which results are presented to the processes at lower levels of the hierarchy.

This column displays only if the `METADATA` option is specified or implied. | - -## Examples - -### Default Query Plans - -By default, `EXPLAIN` includes the least detail about the query plan but can be useful to find out which indexes and keys a query uses. - -~~~ sql -> EXPLAIN SELECT * FROM kv WHERE v > 3 ORDER BY v; -~~~ -~~~ -+-------+------+-------+-------------+ -| Level | Type | Field | Description | -+-------+------+-------+-------------+ -| 0 | sort | | | -| 0 | | order | +v | -| 1 | scan | | | -| 1 | | table | kv@primary | -| 1 | | spans | ALL | -+-------+------+-------+-------------+ -~~~ - -### `EXPRS` Option - -The `EXPRS` option includes SQL expressions that are involved in each processing stage, providing more granular detail about which portion of your query is represented at each level. - -~~~ sql -> EXPLAIN (EXPRS) SELECT * FROM kv WHERE v > 3 ORDER BY v; -~~~ -~~~ -+-------+------+--------+-------------+ -| Level | Type | Field | Description | -+-------+------+--------+-------------+ -| 0 | sort | | | -| 0 | | order | +v | -| 1 | scan | | | -| 1 | | table | kv@primary | -| 1 | | spans | ALL | -| 1 | | filter | v > 3 | -+-------+------+--------+-------------+ -~~~ - -### `METADATA` Option - -The `METADATA` option includes detail about which columns are being used by each level, as well as how columns are being ordered. - -~~~ sql -> EXPLAIN (METADATA) SELECT * FROM kv WHERE v > 3 ORDER BY v; -~~~ -~~~ -+-------+------+-------+-------------+---------+--------------+ -| Level | Type | Field | Description | Columns | Ordering | -+-------+------+-------+-------------+---------+--------------+ -| 0 | sort | | | (k, v) | +v | -| 0 | | order | +v | | | -| 1 | scan | | | (k, v) | +k,+v,unique | -| 1 | | table | kv@primary | | | -| 1 | | spans | ALL | | | -+-------+------+-------+-------------+---------+--------------+ -~~~ - -When looking at the **Ordering** column, we can also sort by descending (`DESC`) values of `k`, which is indicated by the `-` sign. - -~~~ sql -> EXPLAIN (METADATA) SELECT * FROM kv WHERE v > 3 ORDER BY v DESC; -~~~ -~~~ -+-------+------+-------+-------------+---------+--------------+ -| Level | Type | Field | Description | Columns | Ordering | -+-------+------+-------+-------------+---------+--------------+ -| 0 | sort | | | (k, v) | -v | -| 0 | | order | -v | | | -| 1 | scan | | | (k, v) | +k,+v,unique | -| 1 | | table | kv@primary | | | -| 1 | | spans | ALL | | | -+-------+------+-------+-------------+---------+--------------+ -~~~ - -{{site.data.alerts.callout_info}}In some cases the Ordering details report a column ordering with an equal sign (e.g., =k). This is a side effect of the internal ordering analysis performed by CockroachDB and merely indicates that CockroachDB has found that only one row matches a WHERE expression.{{site.data.alerts.end}} - -### `QUALIFY` Option - -`QUALIFY` uses `
.` notation for columns in the query plan. However, `QUALIFY` must be used with `EXPRS` to show the SQL values used. - -~~~ sql -> EXPLAIN (EXPRS, QUALIFY) SELECT a.v, b.v FROM t.kv AS a, t.kv AS b; -~~~ -~~~ -+-------+--------+----------+-------------+ -| Level | Type | Field | Description | -+-------+--------+----------+-------------+ -| 0 | render | | | -| 0 | | render 0 | a.v | -| 0 | | render 1 | b.v | -| 1 | join | | | -| 1 | | type | cross | -| 2 | scan | | | -| 2 | | table | kv@primary | -| 2 | scan | | | -| 2 | | table | kv@primary | -+-------+--------+----------+-------------+ -~~~ - -You can contrast this with the same statement not including the `QUALIFY` option to see that the column references are not qualified, which can lead to ambiguity if multiple tables have columns with the same names. - -~~~ sql -> EXPLAIN (EXPRS) SELECT a.v, b.v FROM t.kv AS a, t.kv AS b; -~~~ -~~~ -+-------+--------+----------+-------------+ -| Level | Type | Field | Description | -+-------+--------+----------+-------------+ -| 0 | render | | | -| 0 | | render 0 | v | -| 0 | | render 1 | v | -| 1 | join | | | -| 1 | | type | cross | -| 2 | scan | | | -| 2 | | table | kv@primary | -| 2 | scan | | | -| 2 | | table | kv@primary | -+-------+--------+----------+-------------+ -~~~ - -### `VERBOSE` Option - -The `VERBOSE` option implies the `EXPRS`, `METADATA`, and `QUALIFY` options. - -~~~ sql -> EXPLAIN (VERBOSE) SELECT * FROM kv AS a JOIN kv USING (k) WHERE a.v > 3 ORDER BY a.v DESC; -~~~ -~~~ -+-------+--------+----------+-------------+-------------------------------------------------+--------------+ -| Level | Type | Field | Description | Columns | Ordering | -+-------+--------+----------+-------------+-------------------------------------------------+--------------+ -| 0 | sort | | | (k, v, v) | -v | -| 0 | | order | -v | | | -| 1 | render | | | (k, v, v) | | -| 1 | | render 0 | k | | | -| 1 | | render 1 | a.v | | | -| 1 | | render 2 | bank.kv.v | | | -| 2 | join | | | (k, k[hidden,omitted], v, k[hidden,omitted], v) | | -| 2 | | type | inner | | | -| 2 | | equality | (k) = (k) | | | -| 3 | scan | | | (k, v) | +k,+v,unique | -| 3 | | table | kv@primary | | | -| 3 | | spans | ALL | | | -| 3 | | filter | v > 3 | | | -| 3 | scan | | | (k, v) | +k,+v,unique | -| 3 | | table | kv@primary | | | -+-------+--------+----------+-------------+-------------------------------------------------+--------------+ -~~~ - -### `TYPES` Option - -The `TYPES` mode includes the types of the values used in the query plan, as well as implying the `METADATA` and `EXPRS` options. - -~~~ sql -> EXPLAIN (TYPES) SELECT * FROM kv WHERE v > 3 order by v; -~~~ -~~~ -+-------+------+--------+-----------------------------+----------------+--------------+ -| Level | Type | Field | Description | Columns | Ordering | -+-------+------+--------+-----------------------------+----------------+--------------+ -| 0 | sort | | | (k int, v int) | +v | -| 0 | | order | +v | | | -| 1 | scan | | | (k int, v int) | +k,+v,unique | -| 1 | | table | kv@primary | | | -| 1 | | spans | ALL | | | -| 1 | | filter | ((v)[int] > (3)[int])[bool] | | | -+-------+------+--------+-----------------------------+----------------+--------------+ -~~~ - -### Find the Indexes and Key Ranges a Query Uses - -You can use `EXPLAIN` to understand which indexes and key ranges queries use, which can help you ensure a query isn't performing a full table scan. - -~~~ sql -> CREATE TABLE kv (k INT PRIMARY KEY, v INT); -~~~ - -Because column `v` is not indexed, queries filtering on it alone scan the entire table: - -~~~ sql -> EXPLAIN SELECT * FROM kv WHERE v BETWEEN 4 AND 5; -~~~ -~~~ -+-------+------+-------+-------------+ -| Level | Type | Field | Description | -+-------+------+-------+-------------+ -| 0 | scan | | | -| 0 | | table | kv@primary | -| 0 | | spans | ALL | -+-------+------+-------+-------------+ -~~~ - -However, in the following query, column `k` is sorted in the `primary` index, so CockroachDB can avoid scanning the entire table: - -~~~ sql -> EXPLAIN SELECT * FROM kv WHERE k BETWEEN 4 AND 5; -~~~ -~~~ -+-------+------+-------+-------------+ -| Level | Type | Field | Description | -+-------+------+-------+-------------+ -| 0 | scan | | | -| 0 | | table | kv@primary | -| 0 | | spans | /4-/6 | -+-------+------+-------+-------------+ -~~~ - -## See Also - -- [`ALTER TABLE`](alter-table.html) -- [`CREATE DATABASE`](create-database.html) -- [`CREATE TABLE`](create-table.html) -- [`DELETE`](delete.html) -- [Indexes](indexes.html) -- [`INSERT`](insert.html) -- [`SELECT`](select.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) -- [`SHOW CREATE TABLE`](show-create-table.html) -- [`UPDATE`](update.html) diff --git a/src/current/v1.0/explore-the-admin-ui.md b/src/current/v1.0/explore-the-admin-ui.md deleted file mode 100644 index fd1e260faef..00000000000 --- a/src/current/v1.0/explore-the-admin-ui.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Explore the Admin UI -toc: true -feedback: false ---- - -CockroachDB's Admin UI provides detail about your cluster's performance and health. - - -## Access the Admin UI - -You can access the UI from any node in the cluster, each of which will show nearly identical data. - -By default, you can access it via HTTP on port `8080` of whatever value you used for the node's `--host` value. For example, `http://:8080`. - -However, you can also set the CockroachDB Admin UI to a custom port using `--http-port` or a custom hostname using `--http-host` when [starting each node](start-a-node.html) (i.e., each node's values are dependent on how it's started; there is no cluster-level configuration for non-default values). For example, if you set both a custom port and hostname, `http://:`. - -For additional guidance on accessing the Admin UI, see [Start a Local Cluster](start-a-local-cluster.html) and [Manual Deployment](manual-deployment.html). - -## More Info -*Additional docs coming soon.* \ No newline at end of file diff --git a/src/current/v1.0/file-an-issue.md b/src/current/v1.0/file-an-issue.md deleted file mode 100644 index 6151e1d21ae..00000000000 --- a/src/current/v1.0/file-an-issue.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: File an Issue -summary: Learn how to file a GitHub issue with CockroachDB. -toc: false ---- - -If you've tried to [troubleshoot](troubleshooting-overview.html) an issue yourself, have [reached out for help](support-resources.html), and are still stumped, you can file an issue in GitHub. - -To file an issue in GitHub, we need the following information: - -1. A summary of the issue. - -2. The steps to reproduce the issue. - -3. The result you expected. - -4. The result that actually occurred. - -5. The first few lines of the log file from each node in the cluster in a timeframe as close as possible to reproducing the issue. On most Unix-based systems running with defaults, you can get this information using the following command: - - ~~~ shell - $ grep -F '[config]' cockroach-data/logs/cockroach.log - ~~~~ - {{site.data.alerts.callout_info}}You might need to replace cockroach-data/logs with the location of your logs.{{site.data.alerts.end}} - If the logs are not available, please include the output of `cockroach version` for each node in the cluster. - -### Template - -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): - -~~~ - -## Summary - - - -## Steps to reproduce - -1. -2. -3. - -## Expected Result - - - -## Actual Result - - - -## Log files/version - -### Node 1 - - - -### Node 2 - - - -### Node 3 - - - -~~~ diff --git a/src/current/v1.0/float.md b/src/current/v1.0/float.md deleted file mode 100644 index f85c65858fe..00000000000 --- a/src/current/v1.0/float.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: FLOAT -summary: The FLOAT data type stores inexact, floating-point numbers with up to 17 digits in total and at least one digit to the right of the decimal point. -toc: true ---- - -The `FLOAT` [data type](data-types.html) stores inexact, floating-point numbers with up to 17 digits of decimal precision. - -They are handled internally using the [standard double-precision -(64-bit binary-encoded) IEEE754 format](https://en.wikipedia.org/wiki/IEEE_floating_point). - - -## Aliases - -In CockroachDB, the following are aliases for `FLOAT`: - -- `REAL` -- `DOUBLE PRECISION` - -## Syntax - -A constant value of type `FLOAT` can be entered as a [numeric literal](sql-constants.html#numeric-literals). -For example: `1.414` or `-1234`. - -The special IEEE754 values for positive infinity, negative infinity -and Not A Number (NaN) cannot be entered using numeric literals -directly and must be converted using an -[interpreted literal](sql-constants.html#interpreted-literals) or an -[explicit conversion](sql-expressions.html#explicit-type-coercions) from -a string literal instead. For example: - -- `FLOAT '+Inf'` -- `'-Inf'::FLOAT` -- `CAST('NaN' AS FLOAT)` - -## Size - -A `FLOAT` column supports values up to 8 bytes in width, but the total storage size is likely to be larger due to CockroachDB metadata. - -## Examples - -~~~ sql -> CREATE TABLE floats (a FLOAT PRIMARY KEY, b REAL, c DOUBLE PRECISION); - -> SHOW COLUMNS FROM floats; -~~~ -~~~ -+-------+-------+-------+---------+ -| Field | Type | Null | Default | -+-------+-------+-------+---------+ -| a | FLOAT | false | NULL | -| b | FLOAT | true | NULL | -| C | FLOAT | true | NULL | -+-------+-------+-------+---------+ -~~~ -~~~ sql -> INSERT INTO floats VALUES (1.012345678901, 2.01234567890123456789, CAST('+Inf' AS FLOAT)); - -> SELECT * FROM floats; -~~~ -~~~ -+----------------+--------------------+------+ -| a | b | c | -+----------------+--------------------+------+ -| 1.012345678901 | 2.0123456789012346 | +Inf | -+----------------+--------------------+------+ -# Note that the value in "b" has been limited to 17 digits. -~~~ - -## Supported Casting & Conversion - -`FLOAT` values can be [cast](data-types.html#data-type-conversions-casts) to any of the following data types: - -Type | Details ------|-------- -`INT` | Truncates decimal precision and requires values to be between -2^63 and 2^63-1 -`DECIMAL` | Causes an error to be reported if the value is NaN or +/- Inf. -`BOOL` | **0** converts to `false`; all other values convert to `true` -`STRING` | -- - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/foreign-key.md b/src/current/v1.0/foreign-key.md deleted file mode 100644 index 8bd51e52440..00000000000 --- a/src/current/v1.0/foreign-key.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: Foreign Key Constraint -summary: The Foreign Key constraint specifies a column can contain only values exactly matching existing values from the column it references. -toc: true ---- - -The Foreign Key [constraint](constraints.html) specifies that all of a column's values must exactly match existing values from the column it references, enforcing referential integrity. - -For example, if you create a foreign key on `orders.customer` that references `customers.id`: - -- Each value inserted or updated in `orders.customer` must exactly match a value in `customers.id`. -- Values in `customers.id` that are referenced by `orders.customer` cannot be deleted or updated. However, `customers.id` values that _aren't_ present in `orders.customer` can be. - -{{site.data.alerts.callout_success}}If you plan to use Foreign Keys in your schema, consider using interleaved tables, which can dramatically improve query performance.{{site.data.alerts.end}} - - -## Details - -### Rules for Creating Foreign Keys - -**Foreign Key Columns** - -- Foreign key columns must use their referenced column's [type](data-types.html). -- Each column cannot belong to more than 1 Foreign Key constraint. -- Foreign key columns must be [indexed](indexes.html). This is required because updates and deletes on the referenced table will need to search the referencing table for any matching records to ensure those operations would not violate existing references. In practice, such indexes are likely also needed by applications using these tables, since finding all records which belong to some entity, for example all orders for a given customer, is very common. - - To meet this requirement when creating a new table, there are a few options: - - Create indexes explicitly using the [`INDEX`](create-table.html#create-a-table-with-secondary-indexes) clause of `CREATE TABLE`. - - Rely on indexes created by the [Primary Key](primary-key.html) or [Unique](unique.html) constraints. - - Have CockroachDB automatically create an index of the foreign key columns for you. However, it's important to note that if you later remove the Foreign Key constraint, this automatically created index _is not_ removed. - - Using the foreign key columns as the prefix of an index's columns also satisfies the requirement for an index. For example, if you create foreign key columns `(A, B)`, an index of columns `(A, B, C)` satisfies the requirement for an index. - - To meet this requirement when adding the Foreign Key constraint to an existing table, if the columns you want to constrain are not already indexed, use [`CREATE INDEX`](create-index.html) to index them and only then use the [`ADD CONSTRAINT`](add-constraint.html) statement to add the Foreign Key constraint to the columns. - -**Referenced Columns** - -- Referenced columns must contain only unique sets of values. This means the `REFERENCES` clause must use exactly the same columns as a [Unique](unique.html) or [Primary Key](primary-key.html) constraint on the referenced table. For example, the clause `REFERENCES tbl (C, D)` requires `tbl` to have either the constraint `UNIQUE (C, D)` or `PRIMARY KEY (C, D)`. -- In the `REFERENCES` clause, if you specify a table but no columns, CockroachDB references the table's primary key. In these cases, the Foreign Key constraint and the referenced table's primary key must contain the same number of columns. - -### _NULL_ Values - -Single-column foreign keys accept _NULL_ values. - -Multiple-column foreign keys only accept _NULL_ values in these scenarios: - -- The row you're ultimately referencing—determined by the statement's other values—contains _NULL_ as the value of the referenced column (i.e., _NULL_ is valid from the perspective of referential integrity) -- The write contains _NULL_ values for all foreign key columns - -For example, if you have a Foreign Key constraint on columns `(A, B)` and try to insert `(1, NULL)`, the write would fail unless the row with the value `1` for `(A)` contained a _NULL_ value for `(B)`. However, inserting `(NULL, NULL)` would succeed. - -However, allowing _NULL_ values in either your foreign key or referenced columns can degrade their referential integrity. To avoid this, you can use the [Not Null constraint](not-null.html) on both sets of columns when [creating your tables](create-table.html). (The Not Null constraint cannot be added to existing tables.) - -### Performance - -Because the Foreign Key constraint requires per-row checks on two tables, statements involving foreign key or referenced columns can take longer to execute. You're most likely to notice this with operations like bulk inserts into the table with the foreign keys. - -We're currently working to improve the performance of these statements, though. - -{{site.data.alerts.callout_success}}You can improve the performance of some statements that use Foreign Keys by also using INTERLEAVE IN PARENT.{{site.data.alerts.end}} - -## Syntax - -Foreign Key constraints can be defined at the [table level](#table-level). However, if you only want the constraint to apply to a single column, it can be applied at the [column level](#column-level). - -{{site.data.alerts.callout_info}}You can also add the Foreign Key constraint to existing tables through ADD CONSTRAINT.{{site.data.alerts.end}} - -### Column Level - -{% include {{ page.version.version }}/sql/diagrams/foreign_key_column_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_name` | The name of the foreign key column. | -| `column_type` | The foreign key column's [data type](data-types.html). | -| `parent_table` | The name of the table the foreign key references. | -| `ref_column_name` | The name of the column the foreign key references.

If you do not include the `ref_column_name` you want to reference from the `parent_table`, CockroachDB uses the first column of `parent_table`'s primary key. -| `column_constraints` | Any other column-level [constraints](constraints.html) you want to apply to this column. | -| `column_def` | Definitions for any other columns in the table. | -| `table_constraints` | Any table-level [constraints](constraints.html) you want to apply. | - -**Example** - -~~~ sql -> CREATE TABLE IF NOT EXISTS orders ( - id INT PRIMARY KEY, - customer INT NOT NULL REFERENCES customers (id), - orderTotal DECIMAL(9,2), - INDEX (customer) - ); -~~~ - -### Table Level - -{% include {{ page.version.version }}/sql/diagrams/foreign_key_table_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_def` | Definitions for the table's columns. | -| `name` | The name of the constraint. | -| `fk_column_name` | The name of the foreign key column. | -| `parent_table` | The name of the table the foreign key references. | -| `ref_column_name` | The name of the column the foreign key references.

If you do not include the `column_name` you want to reference from the `parent_table`, CockroachDB uses the first column of `parent_table`'s primary key. -| `table_constraints` | Any other table-level [constraints](constraints.html) you want to apply. | - -**Example** - -~~~ sql -CREATE TABLE packages ( - customer INT, - "order" INT, - id INT, - address STRING(50), - delivered BOOL, - delivery_date DATE, - PRIMARY KEY (customer, "order", id), - CONSTRAINT fk_order FOREIGN KEY (customer, "order") REFERENCES orders - ) INTERLEAVE IN PARENT orders (customer, "order") - ; -~~~ - -## Usage Example - -~~~ sql -> CREATE TABLE customers (id INT PRIMARY KEY, email STRING UNIQUE); - -> CREATE TABLE IF NOT EXISTS orders ( - id INT PRIMARY KEY, - customer INT NOT NULL REFERENCES customers (id), - orderTotal DECIMAL(9,2), - INDEX (customer) - ); - -> INSERT INTO customers VALUES (1001, 'a@co.tld'); - -> INSERT INTO orders VALUES (1, 1002, 29.99); -~~~ -~~~ -pq: foreign key violation: value [1002] not found in customers@primary [id] -~~~ -~~~ sql -> INSERT INTO orders VALUES (1, 1001, 29.99); - -> UPDATE customers SET id = 1002 WHERE id = 1001; -~~~ -~~~ -pq: foreign key violation: value(s) [1001] in columns [id] referenced in table "orders" -~~~ -~~~ sql -> DELETE FROM customers WHERE id = 1001; -~~~ -~~~ -pq: foreign key violation: value(s) [1001] in columns [id] referenced in table "orders" -~~~ - -## See Also - -- [Constraints](constraints.html) -- [`DROP CONSTRAINT`](drop-constraint.html) -- [`ADD CONSTRAINT`](add-constraint.html) -- [Check constraint](check.html) -- [Default Value constraint](default-value.html) -- [Not Null constraint](not-null.html) -- [Primary Key constraint](primary-key.html) -- [Unique constraint](unique.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) - diff --git a/src/current/v1.0/frequently-asked-questions.md b/src/current/v1.0/frequently-asked-questions.md deleted file mode 100644 index 9e871d58759..00000000000 --- a/src/current/v1.0/frequently-asked-questions.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Frequently Asked Questions -summary: CockroachDB FAQ - What is CockroachDB? How does it work? What makes it different from other databases? -tags: postgres, cassandra, google cloud spanner -toc: true ---- - - -## What is CockroachDB? - -CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. - -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and it's completely [open source](https://github.com/cockroachdb/cockroach). - -## When is CockroachDB a good choice? - -CockroachDB is well suited for applications that require reliable, available, and correct data regardless of scale. It is built to automatically replicate, rebalance, and recover with minimal configuration and operational overhead. Specific use cases include: - -- Distributed or replicated OLTP -- Multi-datacenter deployments -- Infrastructure initiatives built for the cloud - -## When is CockroachDB not a good choice? - -CockroachDB is not a good choice when very low latency reads and writes are critical; use an in-memory database instead. - -Also, CockroachDB is not yet suitable for: - -- Complex SQL JOINS ([the feature still needs optimization](https://www.cockroachlabs.com/blog/cockroachdbs-first-join/)) -- Heavy analytics / OLAP - -## How easy is it to install CockroachDB? - -It's as easy as downloading a binary on OS X and Linux or running our official Docker image on Windows. There are other simple install methods as well, such as running our Homebrew recipe on OS X or building from source files on both OS X and Linux. - -For more details, see [Install CockroachDB](install-cockroachdb.html). - -## How does CockroachDB scale? - -CockroachDB scales horizontally with minimal operator overhead. You can run it on your local computer, a single server, a corporate development cluster, or a private or public cloud. [Adding capacity](start-a-node.html) is as easy as pointing a new node at the running cluster. - -At the key-value level, CockroachDB starts off with a single, empty range. As you put data in, this single range eventually reaches a threshold size (64MB by default). When that happens, the data splits into two ranges, each covering a contiguous segment of the entire key-value space. This process continues indefinitely; as new data flows in, existing ranges continue to split into new ranges, aiming to keep a relatively small and consistent range size. - -When your cluster spans multiple nodes (physical machines, virtual machines, or containers), newly split ranges are automatically rebalanced to nodes with more capacity. CockroachDB communicates opportunities for rebalancing using a peer-to-peer [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) by which nodes exchange network addresses, store capacity, and other information. - -## How does CockroachDB survive failures? - -CockroachDB is designed to survive software and hardware failures, from server restarts to datacenter outages. This is accomplished without confusing artifacts typical of other distributed systems (e.g., stale reads) using strongly-consistent replication as well as automated repair after failures. - -**Replication** - -CockroachDB replicates your data for availability and guarantees consistency between replicas using the [Raft consensus algorithm](https://raft.github.io/), a popular alternative to Paxos. You can [define the location of replicas](configure-replication-zones.html) in various ways, depending on the types of failures you want to secure against and your network topology. You can locate replicas on: - -- Different servers within a rack to tolerate server failures -- Different servers on different racks within a datacenter to tolerate rack power/network failures -- Different servers in different datacenters to tolerate large scale network or power outages - -When replicating across datacenters, be aware that the round-trip latency between datacenters will have a direct effect on your database's performance. Latency in cross-continent clusters will be noticeably worse than in clusters where all nodes are geographically close together. - -**Automated Repair** - -For short-term failures, such as a server restart, CockroachDB uses Raft to continue seamlessly as long as a majority of replicas remain available. Raft makes sure that a new “leader” for each group of replicas is elected if the former leader fails, so that transactions can continue and affected replicas can rejoin their group once they’re back online. For longer-term failures, such as a server/rack going down for an extended period of time or a datacenter outage, CockroachDB automatically rebalances replicas from the missing nodes, using the unaffected replicas as sources. Using capacity information from the gossip network, new locations in the cluster are identified and the missing replicas are re-replicated in a distributed fashion using all available nodes and the aggregate disk and network bandwidth of the cluster. - -## How is CockroachDB strongly-consistent? - -CockroachDB guarantees the SQL isolation level "serializable", the highest defined by the SQL standard. -It does so by combining the Raft consensus algorithm for writes and a custom time-based synchronization algorithms for reads. -See our description of [strong consistency](strong-consistency.html) for more details. - -## How is CockroachDB both highly available and strongly consistent? - -The [CAP theorem](https://en.wikipedia.org/wiki/CAP_theorem) states that it is impossible for a distributed system to simultaneously provide more than two out of the following three guarantees: - -- Consistency -- Availability -- Partition Tolerance - -CockroachDB is a CP (consistent and partition tolerant) system. This means -that, in the presence of partitions, the system will become unavailable rather than do anything which might cause inconsistent results. For example, writes require acknowledgements from a majority of replicas, and reads require a lease, which can only be transferred to a different node when writes are possible. - -Separately, CockroachDB is also Highly Available, although "available" here means something different than the way it is used in the CAP theorem. In the CAP theorem, availability is a binary property, but for High Availability, we talk about availability as a spectrum (using terms like "five nines" for a system that is available 99.999% of the time). - -Being both CP and HA means that whenever a majority of replicas can talk to each other, they should be able to make progress. For example, if you deploy CockroachDB to three datacenters and the network link to one of them fails, the other two datacenters should be able to operate normally with only a few seconds' disruption. We do this by attempting to detect partitions and failures quickly and efficiently, transferring leadership to nodes that are able to communicate with the majority, and routing internal traffic away from nodes that are partitioned away. - -## Why is CockroachDB SQL? - -At the lowest level, CockroachDB is a distributed, strongly-consistent, transactional key-value store, but the external API is Standard SQL with extensions. This provides developers familiar relational concepts such as schemas, tables, columns, and indexes and the ability to structure, manipulate, and query data using well-established and time-proven tools and processes. Also, since CockroachDB supports the PostgreSQL wire protocol, it’s simple to get your application talking to Cockroach; just find your [PostgreSQL language-specific driver](install-client-drivers.html) and start building. - -For more details, learn our [basic CockroachDB SQL statements](learn-cockroachdb-sql.html), explore the [full SQL grammar](sql-grammar.html), and try it out via our [built-in SQL client](use-the-built-in-sql-client.html). Also, to understand how CockroachDB maps SQL table data to key-value storage and how CockroachDB chooses the best index for running a query, see [SQL in CockroachDB](https://www.cockroachlabs.com/blog/sql-in-cockroachdb-mapping-table-data-to-key-value-storage/) and [Index Selection in CockroachDB](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/). - -## Does CockroachDB support distributed transactions? - -Yes. CockroachDB distributes transactions across your cluster, whether it’s a few servers in a single location or many servers across multiple datacenters. Unlike with sharded setups, you don’t need to know the precise location of data; you just talk to any node in your cluster and CockroachDB gets your transaction to the right place seamlessly. Distributed transactions proceed without downtime or additional latency while rebalancing is underway. You can even move tables – or entire databases – between data centers or cloud infrastructure providers while the cluster is under load. - -## Do transactions in CockroachDB guarantee ACID semantics? - -Yes. Every [transaction](transactions.html) in CockroachDB guarantees [ACID semantics](https://en.wikipedia.org/wiki/ACID) spanning arbitrary tables and rows, even when data is distributed. - -- **Atomicity:** Transactions in CockroachDB are “all or nothing.” If any part of a transaction fails, the entire transaction is aborted, and the database is left unchanged. If a transaction succeeds, all mutations are applied together with virtual simultaneity. For a detailed discussion of atomicity in CockroachDB transactions, see [How CockroachDB Distributes Atomic Transactions](https://www.cockroachlabs.com/blog/how-cockroachdb-distributes-atomic-transactions/). -- **Consistency:** SQL operations never see any intermediate states and move the database from one valid state to another, keeping indexes up to date. Operations always see the results of previously completed statements on overlapping data and maintain specified constraints such as unique columns. For a detailed look at how we've tested CockroachDB for correctness and consistency, see [DIY Jepsen Testing of CockroachDB](https://www.cockroachlabs.com/blog/diy-jepsen-testing-cockroachdb/). -- **Isolation:** By default, transactions in CockroachDB use serializable snapshot isolation (SSI). This means that even concurrent read-write transactions will never result in anomalies. We also provide snapshot isolation (SI), which is more performant with high-contention workloads, although it exhibits anomalies not present in SSI (write skew). For a detailed discussion of isolation in CockroachDB transactions, see [Serializable, Lockless, Distributed: Isolation in CockroachDB](https://www.cockroachlabs.com/blog/serializable-lockless-distributed-isolation-cockroachdb/). -- **Durability:** In CockroachDB, every acknowledged write has been persisted consistently on a majority of replicas (by default, at least 2) via the [Raft consensus algorithm](https://raft.github.io/). Power or disk failures that affect only a minority of replicas (typically 1) do not prevent the cluster from operating and do not lose any data. - -## Since CockroachDB is inspired by Spanner, does it require atomic clocks to synchronize time? - -No. CockroachDB was designed to work without atomic clocks or GPS clocks. It’s an open source database intended to be run on arbitrary collections of nodes, from physical servers in a corp development cluster to public cloud infrastructure using the flavor-of-the-month virtualization layer. It’d be a showstopper to require an external dependency on specialized hardware for clock synchronization. However, CockroachDB does require moderate levels of clock synchronization for correctness. If clocks drift past a maximum threshold, nodes will be taken offline. It's therefore highly recommended to run [NTP](http://www.ntp.org/) or other clock synchronization software on each node. - -For more details on how CockroachDB handles unsynchronized clocks, see [Clock Synchronization](recommended-production-settings.html#clock-synchronization). And for a broader discussion of clocks, and the differences between clocks in Spanner and CockroachDB, see [Living Without Atomic Clocks](https://www.cockroachlabs.com/blog/living-without-atomic-clocks/). - -## What languages can I use to work with CockroachDB? - -CockroachDB supports the PostgreSQL wire protocol, so you can use any available PostgreSQL client drivers. We've tested it from the following languages: - -- Go -- Python -- Ruby -- Java -- JavaScript (node.js) -- C++/C -- Clojure -- PHP -- Rust - -See [Install Client Drivers](install-client-drivers.html) for more details. - -## Why does CockroachDB use the PostgreSQL wire protocol instead of the MySQL protocol? - -CockroachDB uses the PostgreSQL wire protocol because it is better documented than the MySQL protocol, and because PostgreSQL has a liberal Open Source license, similar to BSD or MIT licenses, whereas MySQL has the more restrictive GNU General Public License. - -Note, however, that the protocol used doesn't significantly impact how easy it is to port applications. Swapping out SQL network drivers is rather straightforward in nearly every language. What makes it hard to move from one database to another is the dialect of SQL in use. CockroachDB's dialect is based on PostgreSQL as well. - -## What is CockroachDB’s security model? - -You can run a secure or insecure CockroachDB cluster. When secure, client/node and inter-node communication is encrypted, and SSL certificates authenticate the identity of both clients and nodes. When insecure, there's no encryption or authentication. - -Also, CockroachDB supports common SQL privileges on databases and tables. The `root` user has privileges for all databases, while unique users can be granted privileges for specific statements at the database and table-levels. - -For more details, see our documentation on [privileges](privileges.html) and the [`GRANT`](grant.html) statement. - -## How does CockroachDB compare to MySQL or PostgreSQL? - -While all of these databases support SQL syntax, CockroachDB is the only one that scales easily (without the manual complexity of sharding), rebalances and repairs itself automatically, and distributes transactions seamlessly across your cluster. - -For more insight, see [CockroachDB in Comparison](cockroachdb-in-comparison.html). - -## How does CockroachDB compare to Cassandra, HBase, MongoDB, or Riak? - -While all of these are distributed databases, only CockroachDB supports distributed transactions and provides strong consistency. Also, these other databases provide custom APIs, whereas CockroachDB offers standard SQL with extensions. - -For more insight, see [CockroachDB in Comparison](cockroachdb-in-comparison.html). - -## Can a MySQL or PostgreSQL application be migrated to CockroachDB? - -The current version of CockroachDB is intended for use with new applications. The initial subset of SQL we support is small relative to the extensive standard, and every popular database implements its own set of extensions and exhibits a unique set of idiosyncrasies. This makes porting an existing application non-trivial unless it is only a very lightweight consumer of SQL functionality. - -## Does Cockroach Labs offer a cloud database as a service? - -Not yet, but this is on our long-term roadmap. - -## Can I use CockroachDB as a key-value store? - -{% include {{ page.version.version }}/faq/simulate-key-value-store.html %} - -## Have questions that weren’t answered? - -Try searching the rest of our docs for answers or using our other [support resources](support-resources.html), including: - -- [CockroachDB Community Forum](https://forum.cockroachlabs.com) -- [CockroachDB Community Slack](https://cockroachdb.slack.com) -- [StackOverflow](http://stackoverflow.com/questions/tagged/cockroachdb) -- [CockroachDB Support Portal](https://support.cockroachlabs.com) diff --git a/src/current/v1.0/functions-and-operators.md b/src/current/v1.0/functions-and-operators.md deleted file mode 100644 index 8d421e6cfc2..00000000000 --- a/src/current/v1.0/functions-and-operators.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Functions and Operators -summary: CockroachDB supports many built-in functions, aggregate functions, and operators. -toc: true ---- - -## Built-in Functions - -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/release-1.0/docs/generated/sql/functions.md %} - -## Aggregate Functions - -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/release-1.0/docs/generated/sql/aggregates.md %} - -## Operators - -The following table lists all CockroachDB operators from highest to lowest precedence, i.e., the order in which they will be evaluated within a statement. Operators with the same precedence are left associative. This means that those operators are grouped together starting from the left and moving right. - -| Order of Precedence | Operator | Name | Operator Arity | -| ------------------- | -------- | ---- | -------------- | -| 1 | `.` | Member field access operator | binary | -| 2 | `::` | Type cast | binary | -| 3 | `-` | Unary minus | unary | -| | `~` | Bitwise not | unary | -| 4 | `^` | Exponentiation | binary | -| 5 | `*` | Multiplication | binary | -| | `/` | Division | binary | -| | `//` | Floor division | binary | -| | `%` | Modulo | binary | -| 6 | `+` | Addition | binary | -| | `-` | Subtraction | binary | -| 7 | `<<` | Bitwise left-shift | binary | -| | `>>` | Bitwise right-shift | binary | -| 8 | `&` | Bitwise and | binary | -| 9 | `#` | Bitwise xor | binary | -| 10 | | | Bitwise or | binary | -| 11 | || | Concatenation | binary | -| 12 | `[NOT] BETWEEN` | Value is [not] within the range specified | binary | -| | `[NOT] IN` | Value is [not] in the set of values specified | binary | -| | `[NOT] LIKE` | Matches [or not] LIKE expression, case sensitive | binary | -| | `[NOT] ILIKE` | Matches [or not] LIKE expression, case insensitive | binary | -| | `[NOT] SIMILAR` | Matches [or not] SIMILAR TO regular expression | binary | -| | `~` | Matches regular expression, case sensitive | binary | -| | `!~` | Does not match regular expression, case sensitive | binary | -| | `~*` | Matches regular expression, case insensitive | binary | -| | `!~*` | Does not match regular expression, case insensitive | binary | -| 13 | `=` | Equal | binary | -| | `<` | Less than | binary | -| | `>` | Greater than | binary | -| | `<=` | Less than or equal to | binary | -| | `>=` | Greater than or equal to | binary | -| | `!=`, `<>` | Not equal | binary | -| 14 | `IS` | Value identity | binary | -| 15 | `NOT` | Logical NOT | unary | -| 16 | `AND` | Logical AND | binary | -| 17 | `OR` | Logical OR | binary | - -### Supported Operations - -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/release-1.0/docs/generated/sql/operators.md %} - - diff --git a/src/current/v1.0/generate-cockroachdb-resources.md b/src/current/v1.0/generate-cockroachdb-resources.md deleted file mode 100644 index b899c9a4eaf..00000000000 --- a/src/current/v1.0/generate-cockroachdb-resources.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -title: Generate CockroachDB Resources -summary: Use cockroach gen to generate command-line interface utlities, such as man pages, and example data. -toc: true ---- - -The `cockroach gen` command can generate command-line interface (CLI) utilities ([`man` pages](https://en.wikipedia.org/wiki/Man_page) and a`bash` autocompletion script), example SQL data suitable to populate test databases, and an HAProxy configuration file for load balancing a running cluster. - - -## Subcommands - -| Subcommand | Usage | -| -----------|------ | -| `man` | Generate man pages for CockroachDB. | -| `autocomplete` | Generate bash autocompletion script for CockroachDB. | -| `example-data` | Generate example SQL data. | -| `haproxy` | Generate an HAProxy config file for a running CockroachDB cluster. | - -## Synopsis - -~~~ shell -# Generate man pages: -$ cockroach gen man - -# Generate bash autocompletion script: -$ cockroach gen autocomplete - -# Generate example SQL data: -$ cockroach gen example-data intro | cockroach sql -$ cockroach gen example-data startrek | cockroach sql - -# Generate an HAProxy config file for a running cluster: -$ cockroach gen haproxy - -# View help: -$ cockroach gen --help -$ cockroach gen man --help -$ cockroach gen autocomplete --help -$ cockroach gen example-data --help -$ cockroach gen haproxy --help -~~~ - -## Flags - -The `gen` subcommands supports the following [general-use](#general) and [logging](#logging) flags. - -### General - -#### `man` - -Flag | Description ------|----------- -`--path` | The path where man pages will be generated.

**Default:** `man/man1` under the current directory - -#### `autocomplete` - -Flag | Description ------|----------- -`--out` | The path where the autocomplete file will be generated.

**Default:** `cockroach.bash` in the current directory - -#### `example-data` - -No flags are supported. See the [Generate Example Data](#generate-example-data) example for guidance. - -#### `haproxy` - -Flag | Description ------|----------- -`--certs-dir` | The path to the [certificate directory](create-security-certificates.html). The directory must contain valid certificates if running in secure mode.

**Env Variable:** `COCKROACH_CERTS_DIR`
**Default:** `${HOME}/.cockroach-certs/` -`--host` | The server host to connect to. This can be the address of any node in the cluster.

**Env Variable:** `COCKROACH_HOST`
**Default:** `localhost` -`--insecure` | Run in insecure mode. If this flag is not set, the `--certs-dir` flag must point to valid certificates.

**Env Variable:** `COCKROACH_INSECURE`
**Default:** `false` -`--out` | The path where the HAProxy config file will be generated.

**Default:** `haproxy.cfg` in the current directory -`--port`
`-p` | The server port to connect to.

**Env Variable:** `COCKROACH_PORT`
**Default:** `26257` - -### Logging - -By default, the `gen` command logs errors to `stderr`. - -If you need to troubleshoot this command's behavior, you can change its [logging behavior](debug-and-error-logs.html). - -## Examples - -### Generate `man` Pages - -~~~ shell -# Generate man pages: -$ cockroach gen man - -# Move the man pages to the man directory: -$ sudo mv man/man1/* /usr/share/man/man1 - -# Access man pages: -$ man cockroach -~~~ - -### Generate a `bash` Autocompletion Script - -~~~ shell -# Generate bash autocompletion script: -$ cockroach gen autocomplete - -# Add the script to your .bashrc and .bash_profle: -$ printf "\n\n#cockroach bash autocomplete\nsource 'cockroach.bash'" >> ~/.bashrc -$ printf "\n\n#cockroach bash autocomplete\nsource 'cockroach.bash'" >> ~/.bash_profile -~~~ - -You can now use `tab` to autocomplete `cockroach` commands. - -### Generate Example Data - -To test out CockroachDB, you can generate an example `startrek` database, which contains 2 tables, `episodes` and `quotes`. - -~~~ shell -# Generate example `startrek` database: -$ cockroach gen example-data startrek | cockroach sql --insecure -~~~ - -~~~ -CREATE DATABASE -SET -DROP TABLE -DROP TABLE -CREATE TABLE -INSERT 79 -CREATE TABLE -INSERT 200 -~~~ - -~~~ shell -# Launch the built-in SQL client to view it: -$ cockroach sql --insecure -~~~ - -~~~ sql -> SHOW TABLES FROM startrek; -~~~ -~~~ -+----------+ -| Table | -+----------+ -| episodes | -| quotes | -+----------+ -(2 rows) -~~~ - -You can also generate an example `intro` database, which contains 1 table, `mytable`, with a hidden message: - -~~~ shell -# Generate example `intro` database: -$ cockroach gen example-data intro | cockroach sql --insecure -~~~ - -~~~ -CREATE DATABASE -SET -DROP TABLE -CREATE TABLE -INSERT 1 -INSERT 1 -INSERT 1 -INSERT 1 -... -~~~ - -~~~ shell -# Launch the built-in SQL client to view it: -$ cockroach sql --insecure -~~~ - -~~~ sql -> SHOW TABLES FROM intro; -~~~ - -~~~ -+---------+ -| Table | -+---------+ -| mytable | -+---------+ -(1 row) -~~~ - -~~~ sql -> SELECT * FROM intro.mytable WHERE (l % 2) = 0; -~~~ - -~~~ -+----+------------------------------------------------------+ -| l | v | -+----+------------------------------------------------------+ -| 0 | !__aaawwmqmqmwwwaas,,_ .__aaawwwmqmqmwwaaa,, | -| 2 | !"VT?!"""^~~^"""??T$Wmqaa,_auqmWBT?!"""^~~^^""??YV^ | -| 4 | ! "?##mW##?"- | -| 6 | ! C O N G R A T S _am#Z??A#ma, Y | -| 8 | ! _ummY" "9#ma, A | -| 10 | ! vm#Z( )Xmms Y | -| 12 | ! .j####mmm#####mm#m##6. | -| 14 | ! W O W ! jmm###mm######m#mmm##6 | -| 16 | ! ]#me*Xm#m#mm##m#m##SX##c | -| 18 | ! dm#||+*$##m#mm#m#Svvn##m | -| 20 | ! :mmE=|+||S##m##m#1nvnnX##; A | -| 22 | ! :m#h+|+++=Xmm#m#1nvnnvdmm; M | -| 24 | ! Y $#m>+|+|||##m#1nvnnnnmm# A | -| 26 | ! O ]##z+|+|+|3#mEnnnnvnd##f Z | -| 28 | ! U D 4##c|+|+|]m#kvnvnno##P E | -| 30 | ! I 4#ma+|++]mmhvnnvq##P` ! | -| 32 | ! D I ?$#q%+|dmmmvnnm##! | -| 34 | ! T -4##wu#mm#pw##7' | -| 36 | ! -?$##m####Y' | -| 38 | ! !! "Y##Y"- | -| 40 | ! | -+----+------------------------------------------------------+ -(21 rows) -~~~ - -### Generate an HAProxy Configuration File - -[HAProxy](http://www.haproxy.org/) is one of the most popular open-source TCP load balancers, and CockroachDB includes a built-in command for generating a configuration file that is preset to work with your running cluster. - -
- - -

- -
-To generate an HAProxy config file for a secure cluster, run the `cockroach gen haproxy` command, specifying the location of [certificate directory](create-security-certificates.html) and the address of any instance running a CockroachDB node: - -~~~ shell -$ cockroach gen haproxy \ ---certs-dir= \ ---host=
\ ---port=26257 -~~~ -
- -
-To generate an HAProxy config file for an insecure cluster, run the `cockroach gen haproxy` command, specifying the address of any instance running a CockroachDB node: - -~~~ shell -$ cockroach gen haproxy --insecure \ ---host=
\ ---port=26257 -~~~ -
- -By default, the generated configuration file is called `haproxy.cfg` and looks as follows, with the `server` addresses pre-populated correctly: - -~~~ shell -global - maxconn 4096 - -defaults - mode tcp - timeout connect 10s - timeout client 1m - timeout server 1m - -listen psql - bind :26257 - mode tcp - balance roundrobin - server cockroach1 :26257 - server cockroach2 :26257 - server cockroach3 :26257 -~~~ - -The file is preset with the minimal [configurations](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html) needed to work with your running cluster: - -Field | Description -------|------------ -`timout connect`
`timeout client`
`timeout server` | Timeout values that should be suitable for most deployments. -`bind` | The port that HAProxy listens on. This is the port clients will connect to and thus needs to be allowed by your network configuration.

This tutorial assumes HAProxy is running on a separate machine from CockroachDB nodes. If you run HAProxy on the same machine as a node (not recommended), you'll need to change this port, as `26257` is also used for inter-node communication. -`balance` | The balancing algorithm. This is set to `roundrobin` to ensure that connections get rotated amongst nodes (connection 1 on node 1, connection 2 on node 2, etc.). Check the [HAProxy Configuration Manual](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html#4-balance) for details about this and other balancing algorithms. -`server` | For each node in the cluster, this field specifies the interface that the node listens on, i.e., the address passed in the `--host` or `--advertise-host` flag on node startup. - -For more details about these and other configuration settings, see the [HAProxy Configuration Manual](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html). - -## See Also - -- [Other Cockroach Commands](cockroach-commands.html) -- [Manual Deployment](manual-deployment.html) (using HAProxy for load balancing) diff --git a/src/current/v1.0/go-implementation.md b/src/current/v1.0/go-implementation.md deleted file mode 100644 index a8312add77a..00000000000 --- a/src/current/v1.0/go-implementation.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Go Implementation -summary: CockroachDB is built in Go. -toc: false ---- - -The choice of language matters. Speed, stability, maintainability: each of these attributes of the underlying language can impact how quickly CockroachDB evolves and how well it works. Not all languages were created equal. Go is an open source programming language developed primarily at Google as a viable alternative to C++ and Java. - -- Excellent environment for building distributed systems -- Faster compile times -- Garbage collection and type safety provide stability -- Readable, well-documented code encourages open source contributions - -CockroachDB is built in Go - -## See Also - -- [Why Go Was the Right Choice for CockroachDB](https://www.cockroachlabs.com/blog/why-go-was-the-right-choice-for-cockroachdb/) -- [How to Optimize Garbage Collection in Go](https://www.cockroachlabs.com/blog/how-to-optimize-garbage-collection-in-go/) -- [The Cost and Complexity of Cgo](https://www.cockroachlabs.com/blog/the-cost-and-complexity-of-cgo/) -- [Outsmarting Go Dependencies in Testing Code](https://www.cockroachlabs.com/blog/outsmarting-go-dependencies-testing-code/) diff --git a/src/current/v1.0/grant.md b/src/current/v1.0/grant.md deleted file mode 100644 index 29165a41aa8..00000000000 --- a/src/current/v1.0/grant.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: GRANT -summary: The GRANT statement grants user privileges for interacting with specific databases and tables. -toc: true ---- - -The `GRANT` [statement](sql-statements.html) lets you control each [user's](create-and-manage-users.html) SQL [privileges](privileges.html) for interacting with specific databases and tables. - -For privileges required by specific statements, see the documentation for the respective [SQL statement](sql-statements.html). - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/grant.html %} - -## Required Privileges - -The user granting privileges must have the `GRANT` privilege on the target databases or tables. - -## Supported Privileges - -Users can be granted the following privileges. Some privileges are applicable both for databases and tables, while other are applicable only for tables (see **Levels** in the table below). - -- When a user is granted privileges for a database, new tables created in the database will inherit the privileges, but the privileges can then be changed. -- When a user is granted privileges for a table, the privileges are limited to the table. -- The `root` user is automatically assigned the `ALL` privilege for new databases and is the only user allowed to create databases. -- For privileges required by specific statements, see the documentation for the respective [SQL statement](sql-statements.html). - -Privilege | Levels -----------|------------ -`ALL` | Database, Table -`CREATE` | Database, Table -`DROP` | Database, Table -`GRANT` | Database, Table -`SELECT` | Table -`INSERT` | Table -`DELETE` | Table -`UPDATE` | Table - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | A comma-separated list of table names. Alternately, to grant privileges to all tables, use `*`. `ON TABLE table.*` grants apply to all existing tables in a database but will not affect tables created after the grant. -`database_name` | A comma-separated list of database names.

Privileges granted on databases will be inherited by any new tables created in the databases, but do not affect existing tables in the database. -`user_name` | A comma-separated list of [users](create-and-manage-users.html). - -## Examples - -### Grant privileges on databases - -~~~ sql -> GRANT CREATE ON DATABASE db1, db2 TO maxroach, betsyroach; - -> SHOW GRANTS ON DATABASE db1, db2; -~~~ - -~~~ shell -+----------+------------+------------+ -| Database | User | Privileges | -+----------+------------+------------+ -| db1 | betsyroach | CREATE | -| db1 | maxroach | CREATE | -| db1 | root | ALL | -| db2 | betsyroach | CREATE | -| db2 | maxroach | CREATE | -| db2 | root | ALL | -+----------+------------+------------+ -(6 rows) -~~~ - -### Grant privileges on specific tables in a database - -~~~ sql -> GRANT DELETE ON TABLE db1.t1, db1.t2 TO betsyroach; - -> SHOW GRANTS ON TABLE db1.t1, db1.t2; -~~~ - -~~~ shell -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | betsyroach | DELETE | -| t1 | root | ALL | -| t2 | betsyroach | DELETE | -| t2 | root | ALL | -+-------+------------+------------+ -(4 rows) -~~~ - -### Grant privileges on all tables in a database - -~~~ sql -> GRANT SELECT ON TABLE db2.* TO henryroach; - -> SHOW GRANTS ON TABLE db2.*; -~~~ - -~~~ shell -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | henryroach | SELECT | -| t1 | root | ALL | -| t2 | henryroach | SELECT | -| t2 | root | ALL | -+-------+------------+------------+ -(4 rows) -~~~ - -## See Also - -- [Privileges](privileges.html) -- [`REVOKE`](revoke.html) -- [`SHOW GRANTS`](show-grants.html) diff --git a/src/current/v1.0/high-availability.md b/src/current/v1.0/high-availability.md deleted file mode 100644 index cd35155b63b..00000000000 --- a/src/current/v1.0/high-availability.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: High Availability -summary: CockroachDB is designed to survive software and hardware failures, from server restarts to datacenter outages. -toc: false ---- - -CockroachDB is designed to survive software and hardware failures, from server restarts to datacenter outages. This is accomplished without confusing artifacts typical of other distributed systems (e.g., stale reads) using strongly-consistent replication as well as automated repair after failures. - -## Replication - -CockroachDB replicates your data for availability and guarantees consistency between replicas using the [Raft consensus algorithm](https://raft.github.io/), a popular alternative to Paxos. You can [define the location of replicas](configure-replication-zones.html) in various ways, depending on the types of failures you want to secure against and your network topology. You can locate replicas on: - -- Different servers within a rack to tolerate server failures -- Different servers on different racks within a datacenter to tolerate rack power/network failures -- Different servers in different datacenters to tolerate large scale network or power outages - -When replicating across datacenters, be aware that the round-trip latency between datacenters will have a direct effect on your database's performance. Latency in cross-continent clusters will be noticeably worse than in clusters where all nodes are geographically close together. - -## Automated Repair - -For short-term failures, such as a server restart, CockroachDB uses Raft to continue seamlessly as long as a majority of replicas remain available. Raft makes sure that a new “leader” for each group of replicas is elected if the former leader fails, so that transactions can continue and affected replicas can rejoin their group once they’re back online. For longer-term failures, such as a server/rack going down for an extended period of time or a datacenter outage, CockroachDB automatically rebalances replicas from the missing nodes, using the unaffected replicas as sources. Using capacity information from the gossip network, new locations in the cluster are identified and the missing replicas are re-replicated in a distributed fashion using all available nodes and the aggregate disk and network bandwidth of the cluster. diff --git a/src/current/v1.0/import-data.md b/src/current/v1.0/import-data.md deleted file mode 100644 index e994f7e9bb8..00000000000 --- a/src/current/v1.0/import-data.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Import Data -summary: Learn how to import data into a CockroachDB cluster. -toc: true ---- - -CockroachDB supports importing data from `.sql` dumps and some `.csv` files. - -{{site.data.alerts.callout_info}}To import/restore data from CockroachDB-generated enterprise license backups, see RESTORE.{{site.data.alerts.end}} - - -## Import from Generic SQL Dump - -You can execute batches of `INSERT` statements stored in `.sql` files (including those generated by [`cockroach dump`](sql-dump.html)) from the command line, importing data into your cluster. - -~~~ shell -$ cockroach sql --database=[database name] < statements.sql -~~~ - -{{site.data.alerts.callout_success}}Grouping each INSERT statement to include approximately 500 rows will provide the best performance.{{site.data.alerts.end}} - -## Import from PostgreSQL Dump - -If you're importing data from a PostgreSQL deployment, you can import the `.sql` file generated by the `pg_dump` command to more quickly import data. - -{{site.data.alerts.callout_success}}The .sql files generated by pg_dump provide better performance because they use the COPY statement instead of bulk INSERT statements.{{site.data.alerts.end}} - -### Create PostgreSQL SQL File - -Which `pg_dump` command you want to use depends on whether you want to import your entire database or only specific tables: - -- Entire database: - - ~~~ shell - $ pg_dump [database] > [filename].sql - ~~~ - -- Specific tables: - - ~~~ shell - $ pg_dump -t [table] [table's schema] > [filename].sql - ~~~ - -For more details, see PostgreSQL's documentation on [`pg_dump`](https://www.postgresql.org/docs/9.1/static/app-pgdump.html). - -### Reformat SQL File - -After generating the `.sql` file, you need to perform a few editing steps before importing it: - -1. Remove all statements from the file besides the `CREATE TABLE` and `COPY` statements. -2. Manually add the table's [`PRIMARY KEY`](primary-key.html#syntax) constraint to the `CREATE TABLE` statement. - This has to be done manually because PostgreSQL attempts to add the primary key after creating the table, but CockroachDB requires the primary key be defined upon table creation. -3. Review any other [constraints](constraints.html) to ensure they're properly listed on the table. -4. Remove any [unsupported elements](sql-feature-support.html), such as arrays. - -### Import Data - -After reformatting the file, you can import it through `psql`: - -~~~ shell -$ psql -p [port] -h [node host] -d [database] -U [user] < [file name].sql -~~~ - -For reference, CockroachDB uses these defaults: - -- `[port]`: **26257** -- `[user]`: **root** - -## Import from CSV - -You can import numeric data stored in `.csv` files by executing a bash script that reads values from the files and uses them in `INSERT` statements. - -{{site.data.alerts.callout_danger}}To import non-numerical data, convert the .csv file to a .sql file (you can find free conversion software online), and then import the .sql file.{{site.data.alerts.end}} - -### Template - -This template reads 3 columns of numerical data, and converts them into `INSERT` statements, but you can easily adapt the variables (`a`, `b`, `c`) to any number of columns. - -~~~ sql -> \| IFS=","; while read a b c; do echo "INSERT INTO csv VALUES ($a, $b, $c);"; done < test.csv; -~~~ - -### Example - -In this SQL shell example, use `\!` to look at the rows in a CSV file before creating a table and then using `\|` to insert those rows into the table. - -~~~ sql -> \! cat test.csv -~~~ -~~~ -12, 13, 14 -10, 20, 30 -~~~ -~~~ sql -> CREATE TABLE csv (x INT, y INT, z INT); - -> \| IFS=","; while read a b c; do echo "INSERT INTO csv VALUES ($a, $b, $c);"; done < test.csv; - -> SELECT * FROM csv; -~~~ -~~~ -+----+----+----+ -| x | y | z | -+----+----+----+ -| 12 | 13 | 14 | -| 10 | 20 | 30 | -+----+----+----+ -~~~ - -## See Also - -- [SQL Dump (Export)](sql-dump.html) -- [Back up Data](back-up-data.html) -- [Restore Data](restore-data.html) -- [Use the Built-in SQL Client](use-the-built-in-sql-client.html) -- [Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/improve-the-docs.md b/src/current/v1.0/improve-the-docs.md deleted file mode 100644 index 327ab52a9ec..00000000000 --- a/src/current/v1.0/improve-the-docs.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Improve the Docs -summary: Contribute to the improvement and expansion of CockroachDB documentation. -toc: false ---- - -The CockroachDB docs are open source just like the database itself. We welcome your contributions! - -## Write Docs - -Want to contribute to the docs? - -Find an issue with the [help-wanted](https://github.com/cockroachdb/docs/issues?q=is%3Aopen+is%3Aissue+label%3Ahelp-wanted) label and then review [CONTRIBUTING.md](https://github.com/cockroachdb/docs/blob/master/CONTRIBUTING.md) to set yourself up and get started. You can also select **Contribute > Edit This Page** directly on a page. - -## Suggest Improvements - -See an error? Need additional details or clarification? Want a topic added to the docs? - -Select **Contribute > Report Doc Issue** or **Contribute > Suggest New Content** toward the top of the page, or [open an issue](https://github.com/cockroachdb/docs/issues/new?labels=community) directly. - diff --git a/src/current/v1.0/index.md b/src/current/v1.0/index.md deleted file mode 100755 index 99f1b245e0d..00000000000 --- a/src/current/v1.0/index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: CockroachDB Docs -summary: CockroachDB documentation with details on installation, getting started, building an app, deployment, orchestration, and more. -tags: install, build an app, deploy -type: first_page -homepage: true -toc: false -no_toc: true -twitter: false -contribute: false ---- - -CockroachDB is the SQL database for building global, scalable cloud services that survive disasters. -
- -
- -
-
diff --git a/src/current/v1.0/indexes.md b/src/current/v1.0/indexes.md deleted file mode 100644 index 6195a1e14ca..00000000000 --- a/src/current/v1.0/indexes.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Indexes -summary: Indexes improve your database's performance by helping SQL locate data without having to look through every row of a table. -toc: true -toc_not_nested: true ---- - -Indexes improve your database's performance by helping SQL locate data without having to look through every row of a table. - - -## How Do Indexes Work? - -When you create an index, CockroachDB "indexes" the columns you specify, which creates a copy of the columns and then sorts their values (without sorting the values in the table itself). - -After a column is indexed, SQL can easily filter its values using the index instead of scanning each row one-by-one. On large tables, this greatly reduces the number of rows SQL has to use, executing queries exponentially faster. - -For example, if you index an `INT` column and then filter it WHERE <indexed column> = 10, SQL can use the index to find values starting at 10 but less than 11. In contrast, without an index, SQL would have to evaluate _every_ row in the column for values equaling 10. - -### Creation - -Each table automatically has an index created called `primary`, which indexes either its [primary key](primary-key.html) or—if there is no primary key—a unique value for each row known as `rowid`. We recommend always defining a primary key because the index it creates provides much better performance than letting CockroachDB use `rowid`. - -The `primary` index helps filter a table's primary key but doesn't help SQL find values in any other columns. However, you can improve the performance of queries using columns besides the primary key with secondary indexes. You can create them: - -- At the same time as the table with the `INDEX` clause of [`CREATE TABLE`](create-table.html#create-a-table-with-secondary-indexes). In addition to explicitly defined indexes, CockroachDB automatically creates secondary indexes for columns with the [Unique constraint](unique.html). -- For existing tables with [`CREATE INDEX`](create-index.html). -- By applying the Unique constraint to columns with [`ALTER TABLE`](alter-table.html), which automatically creates an index of the constrained columns. - -To create the most useful secondary indexes, you should also check out our [best practices](#best-practices). - -### Selection - -Because each query can use only a single index, CockroachDB selects the index it calculates will scan the fewest rows (i.e., the fastest). For more detail, check out our blog post [Index Selection in CockroachDB](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/). - -To override CockroachDB's index selection, you can also force [queries to use a specific index](select.html#force-index-selection-index-hints) (also known as "index hinting"). - -### Storage - -CockroachDB stores indexes directly in your key-value store. You can find more information in our blog post [Mapping Table Data to Key-Value Storage](https://www.cockroachlabs.com/blog/sql-in-cockroachdb-mapping-table-data-to-key-value-storage/). - -### Locking - -Tables are not locked during index creation thanks to CockroachDB's [schema change procedure](https://www.cockroachlabs.com/blog/how-online-schema-changes-are-possible-in-cockroachdb/). - -### Performance - -Indexes create a trade-off: they greatly improve the speed of queries, but slightly slow down writes (because new values have to be copied and sorted). The first index you create has the largest impact, but additional indexes only introduce marginal overhead. - -To maximize your indexes' performance, we recommend following a few [best practices](#best-practices). - -## Best Practices - -We recommend creating indexes for all of your common queries. To design the most useful indexes, look at each query's `WHERE` and `SELECT` clauses, and create indexes that: - -- [Index all columns](#indexing-columns) in the `WHERE` clause. -- [Store columns](#storing-columns) that are _only_ in the `SELECT` clause. - -### Indexing Columns - -When designing indexes, it's important to consider which columns you index and the order you list them. Here are a few guidelines to help you make the best choices: - -- Each table's [primary key](primary-key.html) (which we recommend always [defining](create-table.html#create-a-table-primary-key-defined)) is automatically indexed. The index it creates (called `primary`) cannot be changed, nor can you change the primary key of a table after it's been created, so this is a critical decision for every table. -- Queries can benefit from an index even if they only filter a prefix of its columns. For example, if you create an index of columns `(A, B, C)`, queries filtering `(A)` or `(A, B)` can still use the index. However, queries that do not filter `(A)` will not benefit from the index.

This feature also lets you avoid using single-column indexes. Instead, use the column as the first column in a multiple-column index, which is useful to more queries. -- Columns filtered in the `WHERE` clause with the equality operators (`=` or `IN`) should come first in the index, before those referenced with inequality operators (`<`, `>`). -- Indexes of the same columns in different orders can produce different results for each query. For more information, see [our blog post on index selection](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/)—specifically the section "Restricting the search space." - -### Storing Columns - -The `STORING` clause specifies columns which are not part of the index key but should be stored in the index. This optimizes queries which retrieve those columns without filtering on them, because it prevents the need to read the primary index. - -### Example - -Say we have a table with three columns, two of which are indexed: - -{% include copy-clipboard.html %} -~~~ sql -> CREATE TABLE tbl (col1 INT, col2 INT, col3 INT, INDEX (col1, col2)); -~~~ - -If we filter on the indexed columns but retrieve the unindexed column, this requires reading `col3` from the primary index via an "index join." - -{% include copy-clipboard.html %} -~~~ sql -> EXPLAIN SELECT col3 FROM tbl WHERE col1 = 10 AND col2 > 1; -~~~ - -~~~ - tree | field | description -+-----------------+-------------+-----------------------+ - render | | - └── index-join | | - │ | table | tbl@primary - │ | key columns | rowid - └── scan | | - | table | tbl@tbl_col1_col2_idx - | spans | /10/2-/11 -~~~ - -However, if we store `col3` in the index, the index join is no longer necessary. This means our query only needs to read from the secondary index, so it will be more efficient. - -{% include copy-clipboard.html %} -~~~ sql -> CREATE TABLE tbl (col1 INT, col2 INT, col3 INT, INDEX (col1, col2) STORING (col3)); -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> EXPLAIN SELECT col3 FROM tbl WHERE col1 = 10 AND col2 > 1; -~~~ - -~~~ - tree | field | description -+-----------+-------------+-------------------+ - render | | - └── scan | | - | table | tbl@tbl_col1_col2_idx - | spans | /10/2-/11 -~~~ - -## See Also - -- [`CREATE INDEX`](create-index.html) -- [`DROP INDEX`](drop-index.html) -- [`RENAME INDEX`](rename-index.html) -- [`SHOW INDEX`](show-index.html) -- [SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/information-schema.md b/src/current/v1.0/information-schema.md deleted file mode 100644 index 2e529a539db..00000000000 --- a/src/current/v1.0/information-schema.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Information Schema -summary: The information_schema database contains read-only views that you can use for introspection into your database's tables, columns, indexes, and views. -toc: true ---- - -CockroachDB represents your cluster's schema in a database called `information_schema`, which contains read-only [views](views.html) that you can use for introspection into your database's tables, columns, indexes, and views. - -This notion of an information schema is part of the SQL standard, which means it is portable and will remain stable. This contrasts with other objects like `pg_catalog`, which is not part of the SQL standard and handles specific implementation issues. - -{{site.data.alerts.callout_info}}The information_schema views typically represent objects that the current user has privilege to access. To ensure you can view your cluster's entire schema, access it as the root user.{{site.data.alerts.end}} - - -## Objects - -To perform introspection on objects, you can either read from the related `information_schema` view or use one of CockroachDB's `SHOW` statements. - -Object | Information Schema View| SHOW . --------|--------------|-------- -Columns | [columns](#columns)| [`SHOW COLUMNS`](show-columns.html) -Constraints | [key_column_usage](#key_column_usage), [table_constraints](#table_constraints)| [`SHOW CONSTRAINTS`](show-constraints.html) -Databases | [schemata](#schemata)| [`SHOW DATABASE`](show-vars.html) -Indexes | [statistics](#statistics)| [`SHOW INDEX`](show-index.html) -Privileges | [schema_privileges](#schema_privileges), [table_privileges](#table_privileges)| [`SHOW GRANTS`](show-grants.html) -Tables | [tables](#tables)| [`SHOW TABLES`](show-tables.html) -Views | [tables](#tables), [views](#views)| [`SHOW CREATE VIEW`](show-create-view.html) - -## Views - -The `information_schema` database is comprised of many views representing your cluster's schema, each of which is detailed below. - -### columns - -The `columns` view contains information about the columns in each table. - -Column | Description --------|----------- -`TABLE_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`TABLE_SCHEMA` | Name of the database containing the table. -`TABLE_NAME` | Name of the table. -`COLUMN_NAME` | Name of the column. -`ORDINAL_POSITION` | Ordinal position of the column in the table (begins at 1). -`COLUMN_DEFAULT` | Default Value for the column. -`IS_NULLABLE` | `YES` if the column accepts *NULL* values; `NO` if it doesn't (e.g., it has the [Not Null constraint](not-null.html)). -`DATA_TYPE` | [Data type](data-types.html) of the column. -`CHARACTER_MAXIMUM_LENGTH` | If `DATA_TYPE` is `STRING`, the maximum length in characters of a value; otherwise *NULL*. -`CHARACTER_OCTET_LENGTH` | If `DATA_TYPE` is `STRING`, the maximum length in octets (bytes) of a value; otherwise *NULL*. -`NUMERIC_PRECISION` | If `DATA_TYPE` is numeric, the declared or implicit precision (i.e., number of significant digits); otherwise *NULL*. -`NUMERIC_SCALE` | If `DATA_TYPE` is an exact numeric type, the scale (i.e., number of digits to the right of the decimal point); otherwise *NULL*. -`DATETIME_PRECISION` | Always *NULL* (unsupported by CockroachDB). - -### key_column_usage - -The `key_column_usage` view identifies columns with [Primary Key](primary-key.html), [Unique](unique.html), or [Foreign Key](foreign-key.html) constraints. - -Column | Description --------|----------- -`CONSTRAINT_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`CONSTRAINT_SCHEMA` | Name of the database containing the constraint. -`CONSTRAINT_NAME` | Name of the constraint. -`TABLE_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`TABLE_SCHEMA` | Name of the database that contains the constrained table. -`TABLE_NAME` | Name of the constrained table. -`COLUMN_NAME` | Name of the constrained column. -`ORDINAL_POSITION` | Ordinal position of the column within the constraint (begins at 1). -`POSITION_IN_UNIQUE_CONSTRAINT` | For Foreign Key constraints, ordinal position of the referenced column within its Unique constraint (begins at 1). - -### schema_privileges - -The `schema_privileges` view identifies which [privileges](privileges.html) have been granted to each user at the database level. - -Column | Description --------|----------- -`GRANTEE` | Username of user with grant. -`TABLE_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`TABLE_SCHEMA` | Name of the database that contains the constrained table. -`PRIVILEGE_TYPE` | Name of the [privilege](privileges.html). -`IS_GRANTABLE` | Always *NULL* (unsupported by CockroachDB). - -### schemata - -The `schemata` view identifies the cluster's databases. - -Column | Description --------|----------- -`TABLE_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`SCHEMA_NAME` | Name of the database. -`DEFAULT_CHARACTER_SET_NAME` | Always *NULL* (unsupported by CockroachDB). -`SQL_PATH` | Always *NULL* (unsupported by CockroachDB). - -### statistics - -The `statistics` view identifies table's [indexes](indexes.html). - -Column | Description --------|----------- -`TABLE_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`TABLE_SCHEMA` | Name of the database that contains the constrained table. -`TABLE_NAME` | Name of the table . -`NON_UNIQUE` | `false` if the index was created by a Unique constraint; `true` if the index was not created by a Unique constraint. -`INDEX_SCHEMA` | Name of the database that contains the index. -`INDEX_NAME` | Name of the index. -`SEQ_IN_INDEX` | Ordinal position of the column within the index (begins at 1). -`COLUMN_NAME` | Name of the column being indexed. -`COLLATION` | Always *NULL* (unsupported by CockroachDB). -`CARDINALITY` | Always *NULL* (unsupported by CockroachDB). -`DIRECTION` | `ASC` (ascending) or `DESC` (descending) order. -`STORING` | `true` if column is [stored](create-index.html#store-columns); `false` if it's indexed. - -### table_constraints - -The `table_constraints` view identifies [constraints](constraints.html) applied to tables. - -Column | Description --------|----------- -`CONSTRAINT_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`CONSTRAINT_SCHEMA` | Name of the database that contains the constraint. -`CONSTRAINT_NAME` | Name of the constraint. -`TABLE_SCHEMA` | Name of the database that contains the constrained table. -`TABLE_NAME` | Name of the constrained table. -`CONSTRAINT_TYPE` | Type of [constraint](constraints.html): `CHECK`, `FOREIGN KEY`, `PRIMARY KEY`, or `UNIQUE`. - -### table_privileges - -The `table_privileges` view identifies which [privileges](privileges.html) have been granted to each user at the table level. - -Column | Description --------|----------- -`GRANTOR` | Always *NULL* (unsupported by CockroachDB). -`GRANTEE` | Username of user with grant. -`TABLE_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`TABLE_SCHEMA` | Name of the database that the grant applies to. -`TABLE_NAME` | Name of the table that the grant applies to. -`PRIVILEGE_TYPE` | Type of [privilege](privileges.html): `SELECT`, `INSERT`, `UPDATE`, `DELETE`, `TRUNCATE`, `REFERENCES`, or `TRIGGER`. -`IS_GRANTABLE` | Always *NULL* (unsupported by CockroachDB). -`WITH_HIERARCHY` | Always *NULL* (unsupported by CockroachDB). - -### tables - -The `tables` view identifies tables and views in the cluster. - -Column | Description --------|----------- -`TABLE_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`TABLE_SCHEMA` | Name of the database that contains the table. -`TABLE_NAME` | Name of the table. -`TABLE_TYPE` | Type of the table: `BASE TABLE` for a normal table, `VIEW` for a view, or `SYSTEM VIEW` for a view created by CockroachDB. -`VERSION` | Version number of the table; versions begin at 1 and are incremented each time an `ALTER TABLE` statement is issued on the table. - -### views - -The `views` view identifies [views](views.html) in the cluster. - -Column | Description --------|----------- -`TABLE_CATALOG` | Always equal to `def` (CockroachDB does not support the notion of catalogs). -`TABLE_SCHEMA` | Name of the database that the view reads from. -`TABLE_NAME` | Name of the table the view reads from. -`VIEW_DEFINITION` | `AS` clause used to [create the view](views.html#creating-views). -`CHECK_OPTION` | Always *NULL* (unsupported by CockroachDB). -`IS_UPDATABLE` | Always *NULL* (unsupported by CockroachDB). -`IS_INSERTABLE_INTO` | Always *NULL* (unsupported by CockroachDB). -`IS_TRIGGER_UPDATABLE` | Always *NULL* (unsupported by CockroachDB). -`IS_TRIGGER_DELETABLE` | Always *NULL* (unsupported by CockroachDB). -`IS_TRIGGER_INSERTABLE_INTO` | Always *NULL* (unsupported by CockroachDB). - -## See Also - -- [`SHOW`](show-vars.html) -- [`SHOW COLUMNS`](show-columns.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) -- [`SHOW CREATE TABLE`](show-create-table.html) -- [`SHOW CREATE VIEW`](show-create-view.html) -- [`SHOW DATABASES`](show-databases.html) -- [`SHOW GRANTS`](show-grants.html) -- [`SHOW INDEX`](show-index.html) -- [`SHOW TABLES`](show-tables.html) diff --git a/src/current/v1.0/insert.md b/src/current/v1.0/insert.md deleted file mode 100644 index bb40f966d91..00000000000 --- a/src/current/v1.0/insert.md +++ /dev/null @@ -1,500 +0,0 @@ ---- -title: INSERT -summary: The INSERT statement inserts one or more rows into a table. -toc: true ---- - -The `INSERT` [statement](sql-statements.html) inserts one or more rows into a table. In cases where inserted values conflict with uniqueness constraints, the `ON CONFLICT` clause can be used to update rather than insert rows. - - -## Required Privileges - -The user must have the `INSERT` [privilege](privileges.html) on the table. To use `ON CONFLICT DO UPDATE`, the user must also have the `UPDATE` privilege on the table. - -## Synopsis - -
{% include {{ page.version.version }}/sql/diagrams/insert.html %}
- -## Parameters - - - -Parameter | Description -----------|------------ -`qualified_name` | The table you want to write data to.| -`AS name` | An alias for the table name. When an alias is provided, it completely hides the actual table name. -`qualified_name_list` | A comma-separated list of column names, in parentheses. -`select_stmt` | A comma-separated list of column values or [value expressions](sql-expressions.html) for a single row, in parentheses. To insert values into multiple rows, use a comma-separated list of parentheses. Alternately, you can use [`SELECT`](select.html) statements to retrieve values from other tables and insert them as new rows. See the [Insert a Single Row](#insert-a-single-row), [Insert Multiple Rows](#insert-multiple-rows), [Insert from a `SELECT` Statement](#insert-from-a-select-statement) examples below.

Each value must match the [data type](data-types.html) of its column. Also, if column names are listed (`qualified_name_list`), values must be in corresponding order; otherwise, they must follow the declared order of the columns in the table. -`DEFAULT VALUES` | To fill all columns with their [default values](default-value.html), use `DEFAULT VALUES` in place of `select_stmt`. To fill a specific column with its default value, leave the value out of the `select_stmt` or use `DEFAULT` at the appropriate position. See the [Insert Default Values](#insert-default-values) examples below. -`on_conflict` | Normally, when inserted values conflict with a Unique constraint on one or more columns, CockroachDB returns an error. To update the affected rows instead, use an `ON CONFLICT` clause containing the column(s) with the unique constraint and the `DO UPDATE SET` expression set to the column(s) to be updated (any `SET` expression supported by the [`UPDATE`](update.html) statement is also supported here). To prevent the affected rows from updating while allowing new rows to be inserted, set `ON CONFLICT` to `DO NOTHING`. See the [Update Values `ON CONFLICT`](#update-values-on-conflict) and [Do Not Update Values `ON CONFLICT`](#do-not-update-values-on-conflict) examples below.

Note that it's not possible to update the same row twice with a single `INSERT ON CONFLICT` statement. Also, if the values in the `SET` expression cause uniqueness conflicts, CockroachDB will return an error.

As a short-hand alternative to the `ON CONFLICT` clause, you can use the [`UPSERT`](upsert.html) statement. However, `UPSERT` does not let you specify the column with the unique constraint; it assumes that the column is the primary key. Using `ON CONFLICT` is therefore more flexible. -`RETURNING target_list` | Return values based on rows inserted, where `target_list` can be specific column names from the table, `*` for all columns, or a computation on specific columns. See the [Insert and Return Values](#insert-and-return-values) example below.

To return nothing in the response, not even the number of rows affected, use `RETURNING NOTHING`.

For `INSERT` statements with `ON CONFLICT` clauses, `RETURNING` is not supported, and `RETURNING NOTHING` is supported only within a [transaction](transactions.html). - -## Examples - -### Insert a Single Row - -~~~ sql -> INSERT INTO accounts (balance, id) VALUES (10000.50, 1); - -> SELECT * FROM accounts; -~~~ -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 10000.5 | -+----+---------+ -~~~ - -If you do not list column names, the statement will use the columns of the table in their declared order: - -~~~ sql -> SHOW COLUMNS FROM accounts; -~~~ -~~~ -+---------+---------+-------+----------------+ -| Field | Type | Null | Default | -+---------+---------+-------+----------------+ -| id | INT | false | unique_rowid() | -| balance | DECIMAL | true | NULL | -+---------+---------+-------+----------------+ -~~~ -~~~ sql -> INSERT INTO accounts VALUES (2, 20000.75); - -> SELECT * FROM accounts; -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.5 | -| 2 | 20000.75 | -+----+----------+ -~~~ - -### Insert Multiple Rows - -~~~ sql -> INSERT INTO accounts (id, balance) VALUES (3, 8100.73), (4, 9400.10); - -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.5 | -| 2 | 20000.75 | -| 3 | 8100.73 | -| 4 | 9400.1 | -+----+----------+ -~~~ - -### Insert from a `SELECT` Statement - -~~~ sql -> SHOW COLUMS FROM other_accounts; -~~~ -~~~ -+--------+---------+-------+---------+ -| Field | Type | Null | Default | -+--------+---------+-------+---------+ -| number | INT | false | NULL | -| amount | DECIMAL | true | NULL | -+--------+---------+-------+---------+ -~~~ -~~~ sql -> INSERT INTO accounts (id, balance) SELECT number, amount FROM other_accounts WHERE id > 4; - -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.5 | -| 2 | 20000.75 | -| 3 | 8100.73 | -| 4 | 9400.1 | -| 5 | 350.1 | -| 6 | 150 | -| 7 | 200.1 | -+----+----------+ -~~~ - -### Insert Default Values - -~~~ sql -> INSERT INTO accounts (id) VALUES (8); -> INSERT INTO accounts (id, balance) VALUES (9, DEFAULT); - -> SELECT * FROM accounts WHERE id in (8, 9); -~~~ -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 8 | NULL | -| 9 | NULL | -+----+---------+ -~~~ -~~~ sql -> INSERT INTO accounts DEFAULT VALUES; - -> SELECT * FROM accounts; -~~~ -~~~ -+--------------------+----------+ -| id | balance | -+--------------------+----------+ -| 1 | 10000.5 | -| 2 | 20000.75 | -| 3 | 8100.73 | -| 4 | 9400.1 | -| 5 | 350.1 | -| 6 | 150 | -| 7 | 200.1 | -| 8 | NULL | -| 9 | NULL | -| 142933248649822209 | NULL | -+--------------------+----------+ -~~~ - -### Insert and Return Values - -In this example, the `RETURNING` clause returns the `id` values of the rows inserted, which are generated server-side by the `unique_rowid()` function. The language-specific versions assume that you have installed the relevant [client drivers](install-client-drivers.html). - -{{site.data.alerts.callout_success}}This use of RETURNING mirrors the behavior of MySQL's last_insert_id() function.{{site.data.alerts.end}} - -{{site.data.alerts.callout_info}}When a driver provides a query() method for statements that return results and an exec() method for statements that do not (e.g., Go), it's likely necessary to use the query() method for INSERT statements with RETURNING.{{site.data.alerts.end}} - -
- - - - - -
- -
-

-~~~ sql -> INSERT INTO accounts (id, balance) - VALUES (DEFAULT, 1000), (DEFAULT, 250) - RETURNING id; -~~~ - -~~~ -+--------------------+ -| id | -+--------------------+ -| 190018410823680001 | -| 190018410823712769 | -+--------------------+ -(2 rows) -~~~ - -
- -
-

- -~~~ python -# Import the driver. -import psycopg2 - -# Connect to the "bank" database. -conn = psycopg2.connect( - database='bank', - user='root', - host='localhost', - port=26257 -) - -# Make each statement commit immediately. -conn.set_session(autocommit=True) - -# Open a cursor to perform database operations. -cur = conn.cursor() - -# Insert two rows into the "accounts" table -# and return the "id" values generated server-side. -cur.execute( - 'INSERT INTO accounts (id, balance) ' - 'VALUES (DEFAULT, 1000), (DEFAULT, 250) ' - 'RETURNING id' -) - -# Print out the returned values. -rows = cur.fetchall() -print('IDs:') -for row in rows: - print([str(cell) for cell in row]) - -# Close the database connection. -cur.close() -conn.close() -~~~ - -The printed values would look like: - -~~~ -IDs: -['190019066706952193'] -['190019066706984961'] -~~~ - -
- -
-

- -~~~ ruby -# Import the driver. -require 'pg' - -# Connect to the "bank" database. -conn = PG.connect( - user: 'root', - dbname: 'bank', - host: 'localhost', - port: 26257 -) - -# Insert two rows into the "accounts" table -# and return the "id" values generated server-side. -conn.exec( - 'INSERT INTO accounts (id, balance) '\ - 'VALUES (DEFAULT, 1000), (DEFAULT, 250) '\ - 'RETURNING id' -) do |res| - -# Print out the returned values. -puts "IDs:" - res.each do |row| - puts row - end -end - -# Close communication with the database. -conn.close() -~~~ - -The printed values would look like: - -~~~ -IDs: -{"id"=>"190019066706952193"} -{"id"=>"190019066706984961"} -~~~ - -
-
-

- -~~~ go -package main - -import ( - "database/sql" - "fmt" - "log" - - _ "github.com/lib/pq" -) - -func main() { - //Connect to the "bank" database. - db, err := sql.Open( - "postgres", - "postgresql://root@localhost:26257/bank?sslmode=disable" - ) - if err != nil { - log.Fatal("error connecting to the database: ", err) - } - - // Insert two rows into the "accounts" table - // and return the "id" values generated server-side. - rows, err := db.Query( - "INSERT INTO accounts (id, balance) " + - "VALUES (DEFAULT, 1000), (DEFAULT, 250) " + - "RETURNING id", - ) - if err != nil { - log.Fatal(err) - } - - // Print out the returned values. - defer rows.Close() - fmt.Println("IDs:") - for rows.Next() { - var id int - if err := rows.Scan(&id); err != nil { - log.Fatal(err) - } - fmt.Printf("%d\n", id) - } -} -~~~ - -The printed values would look like: - -~~~ -IDs: -190019066706952193 -190019066706984961 -~~~ - -
- -
-

- -~~~ js -var async = require('async'); - -// Require the driver. -var pg = require('pg'); - -// Connect to the "bank" database. -var config = { - user: 'root', - host: 'localhost', - database: 'bank', - port: 26257 -}; - -pg.connect(config, function (err, client, done) { - // Closes communication with the database and exits. - var finish = function () { - done(); - process.exit(); - }; - - if (err) { - console.error('could not connect to cockroachdb', err); - finish(); - } - async.waterfall([ - function (next) { - // Insert two rows into the "accounts" table - // and return the "id" values generated server-side. - client.query( - `INSERT INTO accounts (id, balance) - VALUES (DEFAULT, 1000), (DEFAULT, 250) - RETURNING id;`, - next - ); - } - ], - function (err, results) { - if (err) { - console.error('error inserting into and selecting from accounts', err); - finish(); - } - // Print out the returned values. - console.log('IDs:'); - results.rows.forEach(function (row) { - console.log(row); - }); - - finish(); - }); -}); -~~~ - -The printed values would look like: - -~~~ -IDs: -{ id: '190019066706952193' } -{ id: '190019066706984961' } -~~~ - -
- -### Update Values `ON CONFLICT` - -When a uniqueness conflict is detected, CockroachDB stores the row in a temporary table called excluded. This example demonstrates how you use the columns in the temporary excluded table to apply updates on conflict: - -~~~ sql -> INSERT INTO accounts (id, balance) - VALUES (8, 500.50) - ON CONFLICT (id) - DO UPDATE SET balance = excluded.balance; - -> SELECT * FROM accounts WHERE id = 8; -~~~ -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 8 | 500.5 | -+----+---------+ -~~~ - -### Do Not Update Values `ON CONFLICT` - -In this example, we get an error from a uniqueness conflict: - -~~~ sql -> SELECT * FROM accounts WHERE id = 8; -~~~ -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 8 | 500.5 | -+----+---------+ -~~~ -~~~ sql -> INSERT INTO accounts (id, balance) VALUES (8, 125.50); -~~~ -~~~ -pq: duplicate key value (id)=(8) violates unique constraint "primary" -~~~ - -In this example, we use `ON CONFLICT DO NOTHING` to ignore the uniqueness error and prevent the affected row from being updated: - -~~~ sql -> INSERT INTO accounts (id, balance) - VALUES (8, 125.50) - ON CONFLICT (id) - DO NOTHING; - -> SELECT * FROM accounts WHERE id = 8; -~~~ -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 8 | 500.5 | -+----+---------+ -~~~ - -In this example, `ON CONFLICT DO NOTHING` prevents the first row from updating while allowing the second row to be inserted: - -~~~ sql -> INSERT INTO accounts (id, balance) - VALUES (8, 125.50), (10, 450) - ON CONFLICT (id) - DO NOTHING; - -> SELECT * FROM accounts WHERE id in (8, 10); -~~~ -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 8 | 500.5 | -| 10 | 450 | -+----+---------+ -~~~ - -## See Also - -- [`UPSERT`](upsert.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/install-client-drivers.md b/src/current/v1.0/install-client-drivers.md deleted file mode 100644 index f8f1f7f3190..00000000000 --- a/src/current/v1.0/install-client-drivers.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Install Client Drivers -summary: CockroachDB supports the PostgreSQL wire protocol, so you can use any available PostgreSQL client drivers. -toc: false ---- - -CockroachDB supports the PostgreSQL wire protocol, so you can use any available PostgreSQL client drivers. We’ve tested and can recommend the following drivers. - -For code samples using these drivers, see the [Build an App with CockroachDB](build-an-app-with-cockroachdb.html) tutorials. - -Language | Recommended Driver ----------|-------- -Go | [pq](https://godoc.org/github.com/lib/pq) -Python | [psycopg2](http://initd.org/psycopg/) -Ruby | [pg](https://rubygems.org/gems/pg) -Java | [jdbc](https://jdbc.postgresql.org) -Node.js | [pg](https://www.npmjs.com/package/pg) -C | [libpq](http://www.postgresql.org/docs/9.5/static/libpq.html) -C++ | [libpqxx](https://github.com/jtv/libpqxx) -Clojure | [java.jdbc](https://clojure-doc.org/articles/ecosystem/java_jdbc/home/) -PHP | [php-pgsql](https://www.php.net/manual/en/book.pgsql.php) -Rust | postgres {% comment %} This link is in HTML instead of Markdown because HTML proofer dies bc of https://github.com/rust-lang/crates.io/issues/163 {% endcomment %} diff --git a/src/current/v1.0/install-cockroachdb.html b/src/current/v1.0/install-cockroachdb.html deleted file mode 100644 index 1fbb77cf880..00000000000 --- a/src/current/v1.0/install-cockroachdb.html +++ /dev/null @@ -1,508 +0,0 @@ ---- -title: Install CockroachDB -summary: Install CockroachDB on Mac, Linux, or Windows. Sign up for product release notes. -tags: download, binary, homebrew -toc: false -allowed_hashes: [os-mac, os-linux, os-windows] ---- - - - -
- - - -
- -
-

There are four ways to install CockroachDB on macOS. See Release Notes for what's new in the latest release, {{ page.release_info.version }}.

- - - -
-

Download the Binary

-
    -
  1. -

    Download the CockroachDB archive for OS X, and extract the binary:

    - -
    - icon/buttons/copy - -
    -
    $ curl https://binaries.cockroachdb.com/cockroach-{{page.release_info.version}}.darwin-10.9-amd64.tgz \
    -| tar -xz
    - - {{site.data.alerts.callout_info}}You can also download other versions of the binary listed on our Releases page.{{site.data.alerts.end}} -
  2. -
  3. -

    Copy the binary into your PATH so it's easy to execute cockroach commands from any shell:

    - - {% include copy-clipboard.html %}
    cp -i cockroach-{{ page.release_info.version }}.darwin-10.9-amd64/cockroach /usr/local/bin/
    -

    If you get a permissions error, prefix the command with sudo.

    -
  4. -
  5. -

    Get future release notes emailed to you:

    -{% include marketo-install.html uid="1" %} -
  6. -
-

What's Next?

-

Quick start a single- or multi-node cluster locally and talk to it via the built-in SQL client.

- -{% include {{ page.version.version }}/misc/diagnostics-callout.html %} - -
- - - - -
- - - - - - - - - - - - - - - - - - - - -
C++ compilerMust support C++ 11. GCC prior to 6.0 does not work due to this issue. On macOS, Xcode should suffice.
GoVersion 1.8.1 is required.
BashVersions 4+ are preferred, but later releases from the 3.x series are also known to work.
CMakeVersions 3.8+ are known to work.
AutoconfVersion 2.68 or higher is required.
-

A 64-bit system is strongly recommended. Building or running CockroachDB on 32-bit systems has not been tested. You'll also need at least 2GB of RAM. If you plan to run our test suite, you'll need closer to 4GB of RAM.

- -
  • -

    Download the CockroachDB {{ page.release_info.version }} source archive, and extract the sources:

    - -
    - icon/buttons/copy - -
    -
    $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.src.tgz \
    -| tar -xz
    - - {{site.data.alerts.callout_info}}You can also download other versions of the binary listed on our Releases page.{{site.data.alerts.end}} -
  • -
  • In the extracted directory, run make build:

    - - {% include copy-clipboard.html %}
    cd cockroach-{{ page.release_info.version }}
    - - {% include copy-clipboard.html %}
    make build
    - -

    The build process can take 10+ minutes, so please be patient.

    - - {{site.data.alerts.callout_info}}The default binary contains core open-source functionality covered by the Apache License 2 (APL2) and enterprise functionality covered by the CockroachDB Community License (CCL). To build a pure open-source (APL2) version excluding enterprise functionality, use make buildoss. See this blog post for more details.{{site.data.alerts.end}} -
  • -
  • -

    Install the cockroach binary into /usr/local/bin/ so it's easy to execute cockroach commands from any directory:

    - - {% include copy-clipboard.html %}
    make install
    -

    If you get a permissions error, prefix the command with sudo.

    - -

    You can also execute the cockroach binary directly from its built location, ./src/github.com/cockroachdb/cockroach/cockroach, but the rest of the documentation assumes you have the binary on your PATH.

    -
  • -
  • -

    Get future release notes emailed to you:

    -{% include marketo-install.html uid="2" %} -
  • - -

    What's Next?

    -

    Quick start a single- or multi-node cluster locally and talk to it via the built-in SQL client.

    - -{% include {{ page.version.version }}/misc/diagnostics-callout.html %} - - - - - - - - - diff --git a/src/current/v1.0/int.md b/src/current/v1.0/int.md deleted file mode 100644 index cdfbbd96897..00000000000 --- a/src/current/v1.0/int.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: INT -summary: The INT data type stores 64-bit signed integers, that is, whole numbers from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. -toc: true ---- - -The `INT` [data type](data-types.html) stores 64-bit signed integers, that is, whole numbers from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. - -{{site.data.alerts.callout_info}}To auto-generate globally unique integers, use the SERIAL data type.{{site.data.alerts.end}} - - -## Aliases - -In CockroachDB, the following are aliases for `INT`: - -- `SMALLINT` -- `INTEGER` -- `INT8` -- `INT64` -- `BIGINT` - -## Syntax - -A constant value of type `INT` can be entered as a [numeric literal](sql-constants.html#numeric-literals). -For example: `42`, `-1234`, or `0xCAFE`. - -## Size - -An `INT` column supports values up to 64 bits (8 bytes) in width, but the total storage size is likely to be larger due to CockroachDB metadata. - -CockroachDB does not offer multiple integer types for different widths; instead, our compression ensures that smaller integers use less disk space than larger integers. However, you can use the `BIT(n)` type, with `n` from 1 to 64, to constrain integers based on their corresponding binary values. For example, `BIT(5)` would allow `31` because it corresponds to the five-digit binary integer `11111`, but would not allow `32` because it corresponds to the six-digit binary integer `100000`, which is 1 bit too long. See the [example](#examples) below for a demonstration. - -{{site.data.alerts.callout_info}}BIT values are input and displayed in decimal format by default like all other integers, not in binary format. Also note that BIT is equivalent to BIT(1).{{site.data.alerts.end}} - -## Examples - -~~~ sql -> CREATE TABLE ints (a INT PRIMARY KEY, b SMALLINT, c BIT(5)); -~~~ - -~~~ -CREATE TABLE -~~~ - -~~~ sql -> SHOW COLUMNS FROM ints; -~~~ - -~~~ -+-------+--------+-------+---------+-----------+ -| Field | Type | Null | Default | Indices | -+-------+--------+-------+---------+-----------+ -| a | INT | false | NULL | {primary} | -| b | INT | true | NULL | {} | -| c | BIT(5) | true | NULL | {} | -+-------+--------+-------+---------+-----------+ -(3 rows) -~~~ - -~~~ sql -> INSERT INTO ints VALUES (1, 32, 32); -~~~ - -~~~ -pq: bit string too long for type BIT(5) (column "c") -~~~ - -~~~ sql -> INSERT INTO ints VALUES (1, 32, 31); -~~~ - -~~~ -INSERT 1 -~~~ - -~~~ sql -> SELECT * FROM ints; -~~~ - -~~~ -+---+----+----+ -| a | b | c | -+---+----+----+ -| 1 | 32 | 31 | -+---+----+----+ -(1 row) -~~~ - -## Supported Casting & Conversion - -`INT` values can be [cast](data-types.html#data-type-conversions-casts) to any of the following data types: - -Type | Details ------|-------- -`DECIMAL` | –– -`FLOAT` | Loses precision if the `INT` value is larger than 2^53 in magnitude -`BOOL` | **0** converts to `false`; all other values convert to `true` -`DATE` | Converts to days since the Unix epoch (Jan. 1, 1970) -`TIMESTAMP` | Converts to seconds since the Unix epoch (Jan. 1, 1970). This is a CockroachDB experimental feature which may be changed without notice. -`INTERVAL` | Converts to microseconds -`STRING` | –– - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/interleave-in-parent.md b/src/current/v1.0/interleave-in-parent.md deleted file mode 100644 index 3b72d2f8109..00000000000 --- a/src/current/v1.0/interleave-in-parent.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: INTERLEAVE IN PARENT -summary: Interleaving tables improves query performance by optimizing the key-value structure of closely related table's data. -toc: true -toc_not_nested: true ---- - -Interleaving tables improves query performance by optimizing the key-value structure of closely related tables, attempting to keep data on the same [key-value range](frequently-asked-questions.html#how-does-cockroachdb-scale) if it's likely to be read and written together. - -{{site.data.alerts.callout_info}}Interleaving tables does not affect their behavior within SQL.{{site.data.alerts.end}} - - -## How Interleaved Tables Work - -When tables are interleaved, data written to one table (known as the **child**) is inserted directly into another (known as the **parent**) in the key-value store. This is accomplished by matching the child table's Primary Key to the parent's. - -### Interleave Prefix - -For interleaved tables to have Primary Keys that can be matched, the child table must use the parent table's entire Primary Key as a prefix of its own Primary Key––these matching columns are referred to as the **interleave prefix**. It's easiest to think of these columns as representing the same data, which is usually implemented with Foreign Keys. - -{{site.data.alerts.callout_success}}To formally enforce the relationship between each table's interleave prefix columns, we recommend using Foreign Key constraints.{{site.data.alerts.end}} - -For example, if you want to interleave `orders` into `customers` and the Primary Key of customers is `id`, you need to create a column representing `customers.id` as the first column in the Primary Key of `orders`—e.g., with a column called `customer`. So the data representing `customers.id` is the interleave prefix, which exists in the `orders` table as the `customer` column. - -### Key-Value Structure - -When you write data into the child table, it is inserted into the key-value store immediately after the parent table's key matching the interleave prefix. - -For example, if you interleave `orders` into `customers`, the `orders` data is written directly within the `customers` table in the key-value store. The following is a crude, illustrative example of what the keys would look like in this structure: - -~~~ -/customers/1 -/customers/1/orders/1000 -/customers/1/orders/1002 -/customers/2 -/customers/2/orders/1001 -/customers/2/orders/1003 -... -/customers/n/ -/customers/n/orders/ -~~~ - -By writing data in this way, related data is more likely to remain on the same key-value range, which can make it much faster to read from and write to. Using the above example, all of customer 1's data is going to be written to the same range, including its representation in both the `customers` and `orders` tables. - -## When to Interleave Tables - -{% include {{ page.version.version }}/faq/when-to-interleave-tables.html %} - -### Interleaved Hierarchy - -Interleaved tables typically work best when the tables form a hierarchy. For example, you could interleave the table `orders` (as the child) into the table `customers` (as the parent, which represents the people who placed the orders). You can extend this example by also interleaving the tables `invoices` (as a child) and `packages` (as a child) into `orders` (as the parent). - -The entire set of these relationships is referred to as the **interleaved hierarchy**, which contains all of the tables related through [interleave prefixes](#interleave-prefix). - -### Benefits - -In general, reads, writes, and joins of values related through the interleave prefix are *much* faster. However, you can also improve performance with any of the following: - -- Filtering more columns in the interleave prefix (from left to right).

    For example, if the interleave prefix of `packages` is `(customer, order)`, filtering on `customer` would be fast, but filtering on `customer` *and* `order` would be faster. -- Using only tables in the interleaved hierarchy. - -### Tradeoffs - -- In general, reads and deletes over ranges of table values (e.g., `WHERE column > value`) in interleaved tables are slower.

    However, an exception to this is performing operations on ranges of table values in the greatest descendant in the interleaved hierarchy that filters on all columns of the interleave prefix with constant values.

    For example, if the interleave prefix of `packages` is `(customer, order)`, filtering on the entire interleave prefix with constant values while calculating a range of table values on another column, like `WHERE customer = 1 AND order = 1001 AND delivery_date > DATE '2016-01-25'`, would still be fast. -- If the amount of interleaved data stored for any Primary Key value of the root table is larger than [a key-value range's maximum size](configure-replication-zones.html#replication-zone-format) (64MB by default), the interleaved optimizations will be diminished.

    For example, if one customer has 200MB of order data, their data is likely to be spread across multiple key-value ranges and CockroachDB will not be able to access it as quickly, despite it being interleaved. - -## Syntax - -{% include {{ page.version.version }}/sql/diagrams/interleave.html %} - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `CREATE TABLE ...` | For help with this section of the syntax, [`CREATE TABLE`](create-table.html). -| `parent_table` | The name of the parent table you want to interleave the new child table into. | -| `interleave_prefix` | A comma-separated list of columns from the child table's Primary Key that represent the parent table's Primary Key (i.e., the interleave prefix). | - -## Requirements - -- You can only interleave tables when creating the child table. -- Each child table's Primary Key must contain its parent table's Primary Key as a prefix (known as the **interleave prefix**).

    For example, if the parent table's primary key is `(a INT, b STRING)`, the child table's primary key could be `(a INT, b STRING, c DECIMAL)`. - {{site.data.alerts.callout_info}}This requirement is enforced only by ensuring that the columns use the same data types. However, we recommend ensuring the columns refer to the same values by using the Foreign Key constraint.{{site.data.alerts.end}} -- Interleaved tables cannot be the child of more than 1 parent table. However, each parent table can have many children tables. Children tables can also be parents of interleaved tables. - -## Recommendations - -- Use interleaved tables when your schema forms a hierarchy, and the Primary Key of the root table (for example, a "user ID" or "account ID") is a parameter to most of your queries. -- To enforce the relationship between the parent and children table's Primary Keys, use [Foreign Key constraints](foreign-key.html) on the child table. -- In cases where you're uncertain if interleaving tables will improve your queries' performance, test how tables perform under load when they're interleaved and when they aren't. - -## Examples - -### Interleaving Tables - -This example creates an interleaved hierarchy between `customers`, `orders`, and `packages`, as well as the appropriate Foreign Key constraints. You can see that each child table uses its parent table's Primary Key as a prefix of its own Primary Key (the **interleave prefix**). - -~~~ sql -> CREATE TABLE customers ( - id INT PRIMARY KEY, - name STRING(50) - ); - -> CREATE TABLE orders ( - customer INT, - id INT, - total DECIMAL(20, 5), - PRIMARY KEY (customer, id), - CONSTRAINT fk_customer FOREIGN KEY (customer) REFERENCES customers - ) INTERLEAVE IN PARENT customers (customer) - ; - -> CREATE TABLE packages ( - customer INT, - "order" INT, - id INT, - address STRING(50), - delivered BOOL, - delivery_date DATE, - PRIMARY KEY (customer, "order", id), - CONSTRAINT fk_order FOREIGN KEY (customer, "order") REFERENCES orders - ) INTERLEAVE IN PARENT orders (customer, "order") - ; -~~~ - -### Key-Value Storage Example - -It can be easier to understand what interleaving tables does by seeing what it looks like in the key-value store. For example, using the above example of interleaving `orders` in `customers`, we could insert the following values: - -~~~ sql -> INSERT INTO customers - (id, name) VALUES - (1, 'Ha-Yun'), - (2, 'Emanuela'); - -> INSERT INTO orders - (customer, id, total) VALUES - (1, 1000, 100.00), - (2, 1001, 90.00), - (1, 1002, 80.00), - (2, 1003, 70.00); -~~~ - -Using an illustrative format of the key-value store (keys are on the left; values are represented by `-> value`), the data would be written like this: - -~~~ -/customers/ -> 'Ha-Yun' -/customers//orders/ -> 100.00 -/customers//orders/ -> 80.00 -/customers/ -> 'Emanuela' -/customers//orders/ -> 90.00 -/customers//orders/ -> 70.00 -~~~ - -You'll notice that `customers.id` and `orders.customer` are written into the same position in the key-value store. This is how CockroachDB relates the two table's data for the interleaved structure. By storing data this way, accessing any of the `orders` data alongside the `customers` is much faster. - -{{site.data.alerts.callout_info}}If we didn't set Foreign Key constraints between customers.id and orders.customer and inserted orders.customer = 3, the data would still get written into the key-value in the expected location next to the customers table identifier, but SELECT * FROM customers WHERE id = 3 would not return any values.{{site.data.alerts.end}} - -To better understand how CockroachDB writes key-value data, see our blog post [Mapping Table Data to Key-Value Storage](https://www.cockroachlabs.com/blog/sql-in-cockroachdb-mapping-table-data-to-key-value-storage/). - -## See Also - -- [`CREATE TABLE`](create-table.html) -- [Foreign Keys](foreign-key.html) -- [Column Families](column-families.html) - diff --git a/src/current/v1.0/internal/version-switcher-page-data.json b/src/current/v1.0/internal/version-switcher-page-data.json deleted file mode 100644 index 5ec30bf893f..00000000000 --- a/src/current/v1.0/internal/version-switcher-page-data.json +++ /dev/null @@ -1,17 +0,0 @@ ---- -layout: none ---- - -{%- capture page_folder -%}/{{ page.version.version }}/{%- endcapture -%} -{%- assign pages = site.pages | where_exp: "pages", "pages.url contains page_folder" | where_exp: "pages", "pages.name != '404.md'" -%} -{ -{%- for x in pages -%} -{%- assign key = x.url | replace: page_folder, "" -%} -{%- if x.key -%} - {%- assign key = x.key -%} -{%- endif %} - {{ key | jsonify }}: { - "url": {{ x.url | jsonify }} - }{% unless forloop.last %},{% endunless -%} -{% endfor %} -} \ No newline at end of file diff --git a/src/current/v1.0/interval.md b/src/current/v1.0/interval.md deleted file mode 100644 index 79b31d167ae..00000000000 --- a/src/current/v1.0/interval.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: INTERVAL -summary: The INTERVAL data type stores a value that represents a span of time. -toc: true ---- - -The `INTERVAL` [data type](data-types.html) stores a value that represents a span of time. - - -## Syntax - -A constant value of type `INTERVAL` can be expressed using an -[interpreted literal](sql-constants.html#interpreted-literals), or a -string literal -[annotated with](sql-expressions.html#explicitly-typed-expressions) -type `INTERVAL` or -[coerced to](sql-expressions.html#explicit-type-coercions) type -`INTERVAL`. - -`INTERVAL` constants can be expressed using the following formats: - -Format | Description --------|-------- -SQL Standard | `INTERVAL 'Y-M D H:M:S'`

    `Y-M D`: Using a single value defines days only; using two values defines years and months. Values must be integers.

    `H:M:S`: Using a single value defines seconds only; using two values defines hours and minutes. Values can be integers or floats.

    Note that each side is optional. -ISO 8601 | `INTERVAL 'P1Y2M3DT4H5M6S'` -Traditional PostgreSQL | `INTERVAL '1 year 2 months 3 days 4 hours 5 minutes 6 seconds'` -Golang | `INTERVAL '1h2m3s4ms5us6ns'`

    Note that `ms` is milliseconds, `us` is microseconds, and `ns` is nanoseconds. Also, all fields support both integers and floats. - -CockroachDB also supports using uninterpreted -[string literals](sql-constants.html#string-literals) in contexts -where a `INTERVAL` value is otherwise expected. - -Intervals are stored internally as months, days, and nanoseconds. - -## Size - -An `INTERVAL` column supports values up to 24 bytes in width, but the total storage size is likely to be larger due to CockroachDB metadata. - -## Example - -~~~ sql -> CREATE TABLE intervals (a INT PRIMARY KEY, b INTERVAL); -~~~ - -~~~ -CREATE TABLE -~~~ - -~~~ sql -> SHOW COLUMNS FROM intervals; -~~~ - -~~~ -+-------+----------+-------+---------+ -| Field | Type | Null | Default | -+-------+----------+-------+---------+ -| a | INT | false | NULL | -| b | INTERVAL | true | NULL | -+-------+----------+-------+---------+ -~~~ - -~~~ sql -> INSERT INTO intervals VALUES - (1, INTERVAL '1h2m3s4ms5us6ns'), - (2, INTERVAL '1 year 2 months 3 days 4 hours 5 minutes 6 seconds'), - (3, INTERVAL '1-2 3 4:5:6'); -~~~ - -~~~ -INSERT 3 -~~~ - -~~~ sql -> SELECT * FROM intervals; -~~~ - -~~~ -+---+------------------+ -| a | b | -+---+------------------+ -| 1 | 1h2m3.004005006s | -| 2 | 14m3d4h5m6s | -| 3 | 14m3d4h5m6s | -+---+------------------+ -(3 rows) -~~~ - -## Supported Casting & Conversion - -`INTERVAL` values can be [cast](data-types.html#data-type-conversions-casts) to any of the following data types: - -Type | Details ------|-------- -`INT` | Converts to number of seconds (second precision) -`DECIMAL` | Converts to number of seconds (nanosecond precision) -`FLOAT` | Converts to number of picoseconds -`STRING` | Converts to `h-m-s` format (nanosecond precision) - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/keywords-and-identifiers.md b/src/current/v1.0/keywords-and-identifiers.md deleted file mode 100644 index bddda59e818..00000000000 --- a/src/current/v1.0/keywords-and-identifiers.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Keywords & Identifiers -toc: false ---- - -SQL statements consist of two fundamental components: - -- [__Keywords__](#keywords): Words with specific meaning in SQL like `CREATE`, `INDEX`, and `BOOL` -- [__Identifiers__](#identifiers): Names for things like databases and some functions - -## Keywords - -Keywords make up SQL's vocabulary and can have specific meaning in statements. Each SQL keyword that CockroachDB supports is on one of four lists: - -- [Reserved Keywords](sql-grammar.html#reserved_keyword) -- [Type Function Name Keywords](sql-grammar.html#type_func_name_keyword) -- [Column Name Keywords](sql-grammar.html#col_name_keyword) -- [Unreserved Keywords](sql-grammar.html#unreserved_keyword) - -Reserved keywords have fixed meanings and are not typically allowed as identifiers. All other types of keywords are considered non-reserved; they have special meanings in certain contexts and can be used as identifiers in other contexts. - -### Keyword Uses -Most users asking about keywords want to know more about them in terms of: - -- __Names of objects__, covered on this page in [Identifiers](#identifiers) -- __Syntax__, covered in our pages [SQL Statements](sql-statements.html) and [SQL Grammar](sql-grammar.html) - -## Identifiers - -Identifiers are most commonly used as names of objects like databases, tables, or columns—because of this, the terms "name" and "identifier" are often used interchangeably. However, identifiers also have less-common uses, such as changing column labels with `SELECT`. - -### Rules for Identifiers - -In our [SQL grammar](sql-grammar.html), all values that accept an `identifier` must: - -- Begin with a Unicode letter or an underscore (_). Subsequent characters can be letters, underscores, digits (0-9), or dollar signs ($). -- Not equal any [SQL keyword](#keywords) unless the keyword is accepted by the element's syntax. For example, [`name`](sql-grammar.html#name) accepts Unreserved or Column Name keywords. - -To bypass either of these rules, simply surround the identifier with double-quotes ("). However, all references to it must also include double-quotes. - -{{site.data.alerts.callout_info}}Some statements have additional requirements for identifiers. For example, each table in a database must have a unique name. These requirements are documented on each statement's page.{{site.data.alerts.end}} - -## See Also - -- [SQL Statements](sql-statements.html) -- [Full SQL Grammar](sql-grammar.html) diff --git a/src/current/v1.0/known-limitations.md b/src/current/v1.0/known-limitations.md deleted file mode 100644 index 1778b54d5bc..00000000000 --- a/src/current/v1.0/known-limitations.md +++ /dev/null @@ -1,483 +0,0 @@ ---- -title: Known Limitations in CockroachDB v1.0 -summary: Known limitations is CockroachDB v1.0. -toc: true ---- - -This page describes limitations we identified in the [CockroachDB v1.0](../releases/v1.0.html) release. For limitations that have been subsequently resolved, this page also calls out the release incuding the change. - - -## Removing all rows from large tables - -{{site.data.alerts.callout_info}}Resolved as of v1.1. See #17016.{{site.data.alerts.end}} - -When removing all rows from a table via a [`TRUNCATE`](truncate.html) statement or a [`DELETE`](delete.html#delete-all-rows) statement without a `WHERE` clause, CockroachDB batches the entire operation as a single [transaction](transactions.html). For large tables, this can cause the nodes containing the table data to either crash or exhibit poor performance due to elevated memory and CPU usage. - -As a workaround, when you need to remove all rows from a large table: - -1. Use [`SHOW CREATE TABLE`](show-create-table.html) to get the table schema. -2. Use [`DROP TABLE`](drop-table.html) to remove the table. -3. Use [`CREATE TABLE`](create-table.html) with the output from step 1 to recreate the table. - -## Schema changes within transactions - -Within a single [transaction](transactions.html): - -- DDL statements cannot be mixed with DML statements. As a workaround, you can split the statements into separate transactions. -- A [`CREATE TABLE`](create-table.html) statement containing [`FOREIGN KEY`](foreign-key.html) or [`INTERLEAVE`](interleave-in-parent.html) clauses cannot be followed by statements that reference the new table. This also applies to running [`TRUNCATE`](truncate.html) on such a table because `TRUNCATE` implicitly drops and recreates the table. -- A table cannot be dropped and then recreated with the same name. This is not possible within a single transaction because `DROP TABLE` does not immediately drop the name of the table. As a workaround, split the [`DROP TABLE`](drop-table.html) and [`CREATE TABLE`](create-table.html) statements into separate transactions. - -## Schema changes between executions of prepared statements - -When the schema of a table targeted by a prepared statement changes after the prepared statement is created, future executions of the prepared statement could result in an error. For example, adding a column to a table referenced in a prepared statement with a `SELECT *` clause will result in an error: - -{% include_cached copy-clipboard.html %} -~~~ sql -CREATE TABLE users (id INT PRIMARY KEY); -~~~ - -{% include_cached copy-clipboard.html %} -~~~ sql -PREPARE prep1 AS SELECT * FROM users; -~~~ - -{% include_cached copy-clipboard.html %} -~~~ sql -ALTER TABLE users ADD COLUMN name STRING; -~~~ - -{% include_cached copy-clipboard.html %} -~~~ sql -INSERT INTO users VALUES (1, 'Max Roach'); -~~~ - -{% include_cached copy-clipboard.html %} -~~~ sql -EXECUTE prep1; -~~~ - -~~~ -ERROR: cached plan must not change result type -SQLSTATE: 0A000 -~~~ - -It's therefore recommended to explicitly list result columns instead of using `SELECT *` in prepared statements, when possible. - - -## Join flags when restoring a backup onto new machines - -In our [deployment tutorials](manual-deployment.html), when starting the first node of a cluster, the `--join` flag should be empty, but when starting all subsequent nodes, the `--join` flag should be set to the address of node 1. This approach ensures that all nodes have access to a copy of the first key-value range, which is part of a meta-index identifying where all range replicas are stored, and which nodes require to initialize themselves and start accepting incoming connections. - -Ensuring that all nodes have access to a copy of the first key-value range is more difficult when restoring from a whole-cluster backup onto machines with different IP addresses than the original cluster. In this case, the `--join` flags must form a fully-connected directed graph. The easiest way to do this is to put all of the new nodes' addresses into each node's `--join` flag, which ensures all nodes can join a node with a copy of the first key-value range. - -## `INSERT ON CONFLICT` vs. `UPSERT` - -When inserting/updating all columns of a table, and the table has no secondary indexes, we recommend using an [`UPSERT`](upsert.html) statement instead of the equivalent [`INSERT ON CONFLICT`](insert.html) statement. Whereas `INSERT ON CONFLICT` always performs a read to determine the necessary writes, the `UPSERT` statement writes without reading, making it faster. - -This issue is particularly relevant when using a simple SQL table of two columns to [simulate direct KV access](frequently-asked-questions.html#can-i-use-cockroachdb-as-a-key-value-store). In this case, be sure to use the `UPSERT` statement. - -## Repeated or combined commands in the SQL shell history - -{{site.data.alerts.callout_info}}Resolved as of v2.0.{{site.data.alerts.end}} - -Our [built-in SQL shell](use-the-built-in-sql-client.html) stores previously executed commands in the shell's history. In some cases, these commands are unexpectedly duplicated. - -Also, in some terminals, such as `st` or `xterm` without `tmux`, previously executed commands are combined into a single command in the SQL shell history. - -## Using `\|` to perform a large input in the SQL shell - -In the [built-in SQL shell](use-the-built-in-sql-client.html), using the [`\|`](use-the-built-in-sql-client.html#sql-shell-commands) operator to perform a large number of inputs from a file can cause the server to close the connection. This is because `\|` sends the entire file as a single query to the server, which can exceed the upper bound on the size of a packet the server can accept from any client (16MB). - -As a workaround, [execute the file from the command line](use-the-built-in-sql-client.html#execute-sql-statements-from-a-file) with `cat data.sql | cockroach sql` instead of from within the interactive shell. - -## New values generated by `DEFAULT` expressions during `ALTER TABLE ADD COLUMN` - -When executing an [`ALTER TABLE ADD COLUMN`](add-column.html) statement with a [`DEFAULT`](default-value.html) expression, new values generated: - -- use the default [search path](sql-name-resolution.html#search-path) regardless of the search path configured in the current session via `SET SEARCH_PATH`. -- use the UTC time zone regardless of the time zone configured in the current session via [`SET TIME ZONE`](set-vars.html). -- have no default database regardless of the default database configured in the current session via [`SET DATABASE`](set-vars.html), so you must specify the database of any tables they reference. -- use the transaction timestamp for the `statement_timestamp()` function regardless of the time at which the `ALTER` statement was issued. - -## Load-based lease rebalancing in uneven latency deployments - -When nodes are started with the [`--locality`](start-a-node.html#flags) flag, CockroachDB attempts to place the replica lease holder (the replica that client requests are forwarded to) on the node closest to the source of the request. This means as client requests move geographically, so too does the replica lease holder. - -However, you might see increased latency caused by a consistently high rate of lease transfers between datacenters in the following case: - -- Your cluster runs in datacenters which are very different distances away from each other. -- Each node was started with a single tier of `--locality`, e.g., `--locality=datacenter=a`. -- Most client requests get sent to a single datacenter because that's where all your application traffic is. - -To detect if this is happening, open the [Admin UI](explore-the-admin-ui.html), select the **Queues** dashboard, hover over the **Replication Queue** graph, and check the **Leases Transferred / second** data point. If the value is consistently larger than 0, you should consider stopping and restarting each node with additional tiers of locality to improve request latency. - -For example, let's say that latency is 10ms from nodes in datacenter A to nodes in datacenter B but is 100ms from nodes in datacenter A to nodes in datacenter C. To ensure A's and B's relative proximity is factored into lease holder rebalancing, you could restart the nodes in datacenter A and B with a common region, `--locality=region=foo,datacenter=a` and `--locality=region=foo,datacenter=b`, while restarting nodes in datacenter C with a different region, `--locality=region=bar,datacenter=c`. - -## Roundtrip to `STRING` does not respect precedences of `:::` and `-` - -{{site.data.alerts.callout_info}}Resolved as of v1.1. See #15617.{{site.data.alerts.end}} - -Queries with constant expressions that evaluate to 2**-63 might get incorrectly rejected, for example: - -~~~ sql -> CREATE TABLE t (i int PRIMARY KEY); - -> INSERT INTO t VALUES (1), (2), (3); - -> SELECT (-9223372036854775808) ::: INT; - -> SELECT i FROM t WHERE (i, i) < (1, -9223372036854775808); -~~~ - -~~~ -pq: ($0, $0) < (1, - 9223372036854775808:::INT): tuples ($0, $0), (1, - 9223372036854775808:::INT) are not comparable at index 2: numeric constant out of int64 range -~~~ - -## Overload resolution for collated strings - -Many string operations are not properly overloaded for [collated strings](collate.html), for example: - -~~~ sql -> SELECT 'string1' || 'string2'; -~~~ - -~~~ -+------------------------+ -| 'string1' || 'string2' | -+------------------------+ -| string1string2 | -+------------------------+ -(1 row) -~~~ - -~~~ sql -> SELECT ('string1' collate en) || ('string2' collate en); -~~~ - -~~~ -pq: unsupported binary operator: || -~~~ - -## Quoting collation locales containing uppercase letters - -{{site.data.alerts.callout_info}}Resolved as of v1.0.1. See #15917.{{site.data.alerts.end}} - -Quoting a [collation](collate.html) locale containing uppercase letters results in an error, for example: - -~~~ sql -> CREATE TABLE a (b STRING COLLATE "DE"); -~~~ - -~~~ -invalid syntax: statement ignored: invalid locale "DE": language: tag is not well-formed at or near ")" -CREATE TABLE a (b STRING COLLATE "DE"); - ^ -~~~ - -As a workaround, make the locale lowercase or remove the quotes, for example: - -~~~ sql -> CREATE TABLE a (b STRING COLLATE "de"); - -> CREATE TABLE b (c STRING COLLATE DE); -~~~ - -## Creating views with array types - -{{site.data.alerts.callout_info}}Resolved as of v1.0.1. See #15913.{{site.data.alerts.end}} - -Because arrays are not supported, attempting to [create a view](create-view.html) with an array in the `SELECT` query crashes the node that receives the request. - -## Dropping a database containing views - -{{site.data.alerts.callout_info}}Resolved as of v1.0.1. See #15983.{{site.data.alerts.end}} - -When a [view](views.html) queries multiple tables or a single table multiple times (e.g., via [`UNION`](select.html#combine-multiple-selects-union-intersect-except)), dropping the -database containing the tables fails silently. - -## Qualifying a column that comes from a view - -{{site.data.alerts.callout_info}}Resolved as of v1.0.1. See #15984.{{site.data.alerts.end}} - -It is not possible to fully qualify a column that comes from a view because the view gets replaced by an anonymous subquery, for example: - -~~~ sql -> CREATE TABLE test (a INT, b INT); - -> CREATE VIEW Caps AS SELECT a, b FROM test; - -> SELECT sum(Caps.a) FROM Caps GROUP BY b; -~~~ - -~~~ -pq: source name "caps" not found in FROM clause -~~~ - -## Write and update limits for a single transaction - -A single transaction can contain at most 100,000 write operations (e.g., changes to individual columns) and at most 64MiB of combined updates. When a transaction exceeds these limits, it gets aborted. `INSERT INTO .... SELECT FROM ...` queries commonly encounter these limits. - -If you need to increase these limits, you can update the [cluster-wide settings](cluster-settings.html) `kv.transaction.max_intents` and `kv.raft.command.max_size`. For `INSERT INTO .. SELECT FROM` queries in particular, another workaround is to manually page through the data you want to insert using separate transactions. - -## Max size of a single column family - -When creating or updating a row, if the combined size of all values in a single [column family](column-families.html) exceeds the max range size (64MiB by default) for the table, the operation may fail, or cluster performance may suffer. - -As a workaround, you can either [manually split a table's columns into multiple column families](column-families.html#manual-override), or you can [create a table-specific zone configuration](configure-replication-zones.html#create-a-replication-zone-for-a-table) with an increased max range size. - -## Simultaneous client connections and running queries on a single node - -When a node has both a high number of client connections and running queries, the node may crash due to memory exhaustion. This is due to CockroachDB not accurately limiting the number of clients and queries based on the amount of available RAM on the node. - -To prevent memory exhaustion, monitor each node's memory usage and ensure there is some margin between maximum CockroachDB memory usage and available system RAM. For more details about memory usage in CockroachDB, see [this blog post](https://www.cockroachlabs.com/blog/memory-usage-cockroachdb/). - -## SQL subexpressions and memory usage - -Many SQL subexpressions (e.g., `ORDER BY`, `UNION`/`INTERSECT`/`EXCEPT`, `GROUP BY`, subqueries) accumulate intermediate results in RAM on the node processing the query. If the operator attempts to process more rows than can fit into RAM, the node will either crash or report a memory capacity error. For more details about memory usage in CockroachDB, see [this blog post](https://www.cockroachlabs.com/blog/memory-usage-cockroachdb/). - -## Counting distinct rows in a table - -{{site.data.alerts.callout_info}}Resolved as of v1.1. See #17833.{{site.data.alerts.end}} - -When using `count(DISTINCT a.*)` to count distinct rows in a table based on a subset of the columns, as opposed to `count(*)`, the results are almost always incorrect, for example: - -~~~ sql -> CREATE TABLE t (a INT, b INT); - -> INSERT INTO t VALUES (1, 2), (1, 3), (2, 1); - -> SELECT count(DISTINCT t.*) FROM t; -~~~ - -~~~ -+---------------------+ -| count(DISTINCT t.*) | -+---------------------+ -| 1 | -+---------------------+ -(1 row) -~~~ - -As a workaround, list the columns explicitly, for example: - -~~~ sql -> SELECT count(DISTINCT (t.a, t.b)) FROM t; -~~~ - -~~~ -+----------------------------+ -| count(DISTINCT (t.a, t.b)) | -+----------------------------+ -| 3 | -+----------------------------+ -(1 row) -~~~ - -## Running on Windows as a non-admin user - -{{site.data.alerts.callout_info}}Resolved as of v1.0.1. See #15916.{{site.data.alerts.end}} - -By default, CockroachDB periodically rotates the file it writes logs to, as well as a symlink pointing to the file it's currently using. However, on Windows, non-admin users cannot create symlinks, which prevents CockroachDB from starting because it cannot create logs. - -To resolve this issue, non-admin users must log to `stdout` (instead of files) by passing `--log-dir=` (with the empty value) to the `cockroach start` command, for example: - -~~~ shell -$ cockroach.exe start --log-dir= --insecure -~~~ - -## Query planning for `OR` expressions - -Given a query like `SELECT * FROM foo WHERE a > 1 OR b > 2`, even if there are appropriate indexes to satisfy both `a > 1` and `b > 2`, the query planner performs a full table or index scan because it cannot use both conditions at once. - -## Privileges for `DELETE` and `UPDATE` - -Every [`DELETE`](delete.html) or [`UPDATE`](update.html) statement constructs a `SELECT` statement, even when no `WHERE` clause is involved. As a result, the user executing `DELETE` or `UPDATE` requires both the `DELETE` and `SELECT` or `UPDATE` and `SELECT` [privileges](privileges.html) on the table. - -## Dropping an index interleaved into another index on the same table - -{{site.data.alerts.callout_info}}Resolved as of [v1.1-alpha.20170831](../releases/v1.1.html#v1-1-alpha-20170831.html). See #17860.{{site.data.alerts.end}} - -In the unlikely case that you [interleave](interleave-in-parent.html) an index into another index on the same table and then [drop](drop-index.html) the interleaved index, future DDL operations on the table will fail. - -For example: - -~~~ sql -> CREATE TABLE t1 (id1 INT PRIMARY KEY, id2 INT, id3 INT); -~~~ - -~~~ sql -> CREATE INDEX c ON t1 (id2) - STORING (id1, id3) - INTERLEAVE IN PARENT t1 (id2); -~~~ - -~~~ sql -> SHOW INDEXES FROM t1; -~~~ - -~~~ -+-------+---------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+-------+---------+--------+-----+--------+-----------+---------+----------+ -| t1 | primary | true | 1 | id1 | ASC | false | false | -| t1 | c | false | 1 | id2 | ASC | false | false | -| t1 | c | false | 2 | id1 | N/A | true | false | -| t1 | c | false | 3 | id3 | N/A | true | false | -+-------+---------+--------+-----+--------+-----------+---------+----------+ -(4 rows) -~~~ - -~~~ sql -> DROP INDEX t1@c; -~~~ - -~~~ sql -> DROP TABLE t1; -~~~ - -~~~ -pq: invalid interleave backreference table=t1 index=3: index-id "3" does not exist -~~~ - -~~~ sql -> TRUNCATE TABLE t1; -~~~ - -~~~ -pq: invalid interleave backreference table=t1 index=3: index-id "3" does not exist -~~~ - -~~~ sql -> ALTER TABLE t1 RENAME COLUMN id3 TO id4; -~~~ - -~~~ -pq: invalid interleave backreference table=t1 index=3: index-id "3" does not exist -~~~ - -## Order of dumped schemas and incorrect schemas of dumped views - -{{site.data.alerts.callout_info}}Resolved as of v1.1. See #17581.{{site.data.alerts.end}} - -When using the [`cockroach dump`](sql-dump.html) command to export the schemas of all tables and views in a database, the schemas are ordered alphabetically by name. This is not always an ordering in which the tables and views can be successfully recreated. Also, the schemas of views are dumped incorrectly as `CREATE TABLE` statements. - -For example, consider a database `test` with 2 tables and 1 view. Table `a` has a foreign key reference to table `c`, and view `b` references table `c`: - -~~~ sql -> CREATE DATABASE test; - -> CREATE TABLE test.c (a INT PRIMARY KEY, b STRING); - -> CREATE TABLE test.a (a INT PRIMARY KEY, b INT NOT NULL REFERENCES test.c (a)) - -> CREATE VIEW test.b AS SELECT b FROM test.c; -~~~ - -When you dump the schemas of the tables and views in database `test`, they are ordered alphabetically by name, and the schema for view `b` is incorrectly listed as a `CREATE TABLE` statement: - -~~~ shell -$ cockroach dump --insecure --dump-mode=schema > dump.txt -~~~ - -~~~ shell -$ cat dump.txt -~~~ - -~~~ -CREATE TABLE a ( - a INT NOT NULL, - b INT NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (a ASC), - CONSTRAINT fk_b_ref_c FOREIGN KEY (b) REFERENCES c (a), - FAMILY "primary" (a, b) -); - -CREATE TABLE b ( - b STRING NOT NULL -); - -CREATE TABLE c ( - a INT NOT NULL, - b STRING NULL, - CONSTRAINT "primary" PRIMARY KEY (a ASC), - FAMILY "primary" (a, b) -); -~~~ - -If you tried to import these incorrectly ordered schemas to restore the `test` database or create a new database, the import would fail. - -As a workaround, before using exported schemas to recreate tables and views, you must reorder the schemas so that tables with foreign keys and views are listed after the tables they reference, and you must fix `CREATE` statements for views: - -~~~ -CREATE TABLE c ( - a INT NOT NULL, - b STRING NULL, - CONSTRAINT "primary" PRIMARY KEY (a ASC), - FAMILY "primary" (a, b) -); - -CREATE TABLE a ( - a INT NOT NULL, - b INT NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (a ASC), - CONSTRAINT fk_b_ref_c FOREIGN KEY (b) REFERENCES c (a), - FAMILY "primary" (a, b) -); - -CREATE VIEW b AS SELECT b FROM c; -~~~ - -## Dumping data for a view - -{{site.data.alerts.callout_info}}Resolved as of v1.1. See #17581.{{site.data.alerts.end}} - -When using the [`cockroach dump`](sql-dump.html) command to export the data of a [view](views.html), the dump fails. This is because, unlike standard tables, a view does not contain any physical data; instead, it is a stored `SELECT` query that, when requested, dynamically forms a virtual table. - -For example, consider a database `test` with a standard table `t1` and a view `v1` that references `t1`: - -~~~ sql -> CREATE DATABASE test; - -> CREATE TABLE test.t1 (a INT PRIMARY KEY, b STRING); - -> INSERT INTO test.t1 VALUES (1, 'a'), (2, 'b'); - -> CREATE VIEW test.v1 AS SELECT b FROM test.t1; -~~~ - -Trying to dump the data of the view results in an error: - -~~~ shell -$ cockroach dump test v1 --insecure --dump-mode=data -~~~ - -~~~ -Error: pq: column name "rowid" not found -Failed running "dump" -~~~ - -This error occurs when trying to dump all the data in database `test` as well: - -~~~ shell -$ cockroach dump test --insecure --dump-mode=data -~~~ - -~~~ -INSERT INTO t1 (a, b) VALUES - (1, 'a'), - (2, 'b'); -Error: pq: column name "rowid" not found -Failed running "dump" -~~~ - -As a workound, when dumping all the data in a database, explicitly list the tables that should be dumped, excluding any views: - -~~~ shell -$ cockroach dump test t1 --insecure --dump-mode=data -~~~ - -~~~ -INSERT INTO t1 (a, b) VALUES - (1, 'a'), - (2, 'b'); -~~~ diff --git a/src/current/v1.0/learn-cockroachdb-sql.md b/src/current/v1.0/learn-cockroachdb-sql.md deleted file mode 100644 index c49090359a0..00000000000 --- a/src/current/v1.0/learn-cockroachdb-sql.md +++ /dev/null @@ -1,413 +0,0 @@ ---- -title: Learn CockroachDB SQL -summary: Learn some of the most essential CockroachDB SQL statements. -toc: true ---- - -This page walks you through some of the most essential CockroachDB SQL statements. For a complete list and related details, see [SQL Statements](sql-statements.html). - -{{site.data.alerts.callout_info}}CockroachDB aims to provide standard SQL with extensions, but some standard SQL functionality is not yet available. See our SQL Feature Support page for more details.{{site.data.alerts.end}} - - -## Create a Database - -CockroachDB comes with a single default `system` database, which contains CockroachDB metadata and is read-only. To create a new database, use [`CREATE DATABASE`](create-database.html) followed by a database name: - -{% include copy-clipboard.html %} -~~~ sql -> CREATE DATABASE bank; -~~~ - -Database names must follow [these identifier rules](keywords-and-identifiers.html#identifiers). To avoid an error in case the database already exists, you can include `IF NOT EXISTS`: - -{% include copy-clipboard.html %} -~~~ sql -> CREATE DATABASE IF NOT EXISTS bank; -~~~ - -When you no longer need a database, use [`DROP DATABASE`](drop-database.html) followed by the database name to remove the database and all its objects: - -{% include copy-clipboard.html %} -~~~ sql -> DROP DATABASE bank; -~~~ - -## Show Databases - -To see all databases, use the [`SHOW DATABASES`](show-databases.html) statement: - -{% include copy-clipboard.html %} -~~~ sql -> SHOW DATABASES; -~~~ - -~~~ -+--------------------+ -| Database | -+--------------------+ -| bank | -| crdb_internal | -| information_schema | -| pg_catalog | -| system | -+--------------------+ -(5 rows) -~~~ - -## Set the Default Database - -To set the default database, use the [`SET`](set-vars.html#examples) statement: - -{% include copy-clipboard.html %} -~~~ sql -> SET DATABASE = bank; -~~~ - -When working with the default database, you do not need to reference it explicitly in statements. To see which database is currently the default, use the `SHOW DATABASE` statement (note the singular form): - -{% include copy-clipboard.html %} -~~~ sql -> SHOW DATABASE; -~~~ - -~~~ -+----------+ -| database | -+----------+ -| bank | -+----------+ -(1 row) -~~~ - -## Create a Table - -To create a table, use [`CREATE TABLE`](create-table.html) followed by a table name, the column names, and the [data type](data-types.html) and [constraint](constraints.html), if any, for each column: - -{% include copy-clipboard.html %} -~~~ sql -> CREATE TABLE accounts ( - id INT PRIMARY KEY, - balance DECIMAL -); -~~~ - -Table and column names must follow [these rules](keywords-and-identifiers.html#identifiers). Also, when you do not explicitly define a [primary key](primary-key.html), CockroachDB will automatically add a hidden `rowid` column as the primary key. - -To avoid an error in case the table already exists, you can include `IF NOT EXISTS`: - -{% include copy-clipboard.html %} -~~~ sql -> CREATE TABLE IF NOT EXISTS accounts ( - id INT PRIMARY KEY, - balance DECIMAL -); -~~~ - -To show all of the columns from a table, use [`SHOW COLUMNS FROM`](show-columns.html) followed by the table name: - -{% include copy-clipboard.html %} -~~~ sql -> SHOW COLUMNS FROM accounts; -~~~ - -~~~ -+---------+---------+-------+---------+-----------+ -| Field | Type | Null | Default | Indices | -+---------+---------+-------+---------+-----------+ -| id | INT | false | NULL | {primary} | -| balance | DECIMAL | true | NULL | {} | -+---------+---------+-------+---------+-----------+ -(2 rows) -~~~ - -When you no longer need a table, use [`DROP TABLE`](drop-table.html) followed by the table name to remove the table and all its data: - -{% include copy-clipboard.html %} -~~~ sql -> DROP TABLE accounts; -~~~ - -## Show Tables - -To see all tables in the active database, use the [`SHOW TABLES`](show-tables.html) statement: - -{% include copy-clipboard.html %} -~~~ sql -> SHOW TABLES; -~~~ - -~~~ -+----------+ -| Table | -+----------+ -| accounts | -| users | -+----------+ -(2 rows) -~~~ - -To view tables in a database that's not active, use `SHOW TABLES FROM` followed by the name of the database: - -{% include copy-clipboard.html %} -~~~ sql -> SHOW TABLES FROM animals; -~~~ - -~~~ -+-----------+ -| Table | -+-----------+ -| aardvarks | -| elephants | -| frogs | -| moles | -| pandas | -| turtles | -+-----------+ -(6 rows) -~~~ - -## Insert Rows into a Table - -To insert a row into a table, use [`INSERT INTO`](insert.html) followed by the table name and then the column values listed in the order in which the columns appear in the table: - -{% include copy-clipboard.html %} -~~~ sql -> INSERT INTO accounts VALUES (1, 10000.50); -~~~ - -If you want to pass column values in a different order, list the column names explicitly and provide the column values in the corresponding order: - -{% include copy-clipboard.html %} -~~~ sql -> INSERT INTO accounts (balance, id) VALUES - (25000.00, 2); -~~~ - -To insert multiple rows into a table, use a comma-separated list of parentheses, each containing column values for one row: - -{% include copy-clipboard.html %} -~~~ sql -> INSERT INTO accounts VALUES - (3, 8100.73), - (4, 9400.10); -~~~ - -[Default values](default-value.html) are used when you leave specific columns out of your statement, or when you explicitly request default values. For example, both of the following statements would create a row with `balance` filled with its default value, in this case `NULL`: - -{% include copy-clipboard.html %} -~~~ sql -> INSERT INTO accounts (id) VALUES - (5); -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> INSERT INTO accounts (id, balance) VALUES - (6, DEFAULT); -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM accounts WHERE id in (5, 6); -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 5 | NULL | -| 6 | NULL | -+----+---------+ -(2 rows) -~~~ - -## Create an Index -[Indexes](indexes.html) help locate data without having to look through every row of a table. They're automatically created for the [primary key](primary-key.html) of a table and any columns with a [Unique constraint](unique.html). - -To create an index for non-unique columns, use [`CREATE INDEX`](create-index.html) followed by an optional index name and an `ON` clause identifying the table and column(s) to index. For each column, you can choose whether to sort ascending (`ASC`) or descending (`DESC`). - -{% include copy-clipboard.html %} -~~~ sql -> CREATE INDEX balance_idx ON accounts (balance DESC); -~~~ - -You can create indexes during table creation as well; just include the `INDEX` keyword followed by an optional index name and the column(s) to index: - -{% include copy-clipboard.html %} -~~~ sql -> CREATE TABLE accounts ( - id INT PRIMARY KEY, - balance DECIMAL, - INDEX balance_idx (balance) -); -~~~ - -## Show Indexes on a Table - -To show the indexes on a table, use [`SHOW INDEX FROM`](show-index.html) followed by the name of the table: - -{% include copy-clipboard.html %} -~~~ sql -> SHOW INDEX FROM accounts; -~~~ - -~~~ -+----------+-------------+--------+-----+---------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+----------+-------------+--------+-----+---------+-----------+---------+----------+ -| accounts | primary | true | 1 | id | ASC | false | false | -| accounts | balance_idx | false | 1 | balance | DESC | false | false | -| accounts | balance_idx | false | 2 | id | ASC | false | true | -+----------+-------------+--------+-----+---------+-----------+---------+----------+ -(3 rows) -~~~ - -## Query a Table - -To query a table, use [`SELECT`](select.html) followed by a comma-separated list of the columns to be returned and the table from which to retrieve the data: - -{% include copy-clipboard.html %} -~~~ sql -> SELECT balance FROM accounts; -~~~ - -~~~ -+----------+ -| balance | -+----------+ -| 10000.50 | -| 25000.00 | -| 8100.73 | -| 9400.10 | -| NULL | -| NULL | -+----------+ -(6 rows) -~~~ - -To retrieve all columns, use the `*` wildcard: - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM accounts; -~~~ - -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.50 | -| 2 | 25000.00 | -| 3 | 8100.73 | -| 4 | 9400.10 | -| 5 | NULL | -| 6 | NULL | -+----+----------+ -(6 rows) -~~~ - -To filter the results, add a `WHERE` clause identifying the columns and values to filter on: - -{% include copy-clipboard.html %} -~~~ sql -> SELECT id, balance FROM accounts WHERE balance > 9000; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 2 | 25000 | -| 1 | 10000.5 | -| 4 | 9400.1 | -+----+---------+ -(3 rows) -~~~ - -To sort the results, add an `ORDER BY` clause identifying the columns to sort by. For each column, you can choose whether to sort ascending (`ASC`) or descending (`DESC`). - -{% include copy-clipboard.html %} -~~~ sql -> SELECT id, balance FROM accounts ORDER BY balance DESC; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 2 | 25000 | -| 1 | 10000.5 | -| 4 | 9400.1 | -| 3 | 8100.73 | -| 5 | NULL | -| 6 | NULL | -+----+---------+ -(6 rows) -~~~ - -## Update Rows in a Table - -To update rows in a table, use [`UPDATE`](update.html) followed by the table name, a `SET` clause identifying the columns to update and their new values, and a `WHERE` clause identifying the rows to update: - -{% include copy-clipboard.html %} -~~~ sql -> UPDATE accounts SET balance = balance - 5.50 WHERE balance < 10000; -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM accounts; -~~~ - -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.50 | -| 2 | 25000.00 | -| 3 | 8095.23 | -| 4 | 9394.60 | -| 5 | NULL | -| 6 | NULL | -+----+----------+ -(6 rows) -~~~ - -If a table has a primary key, you can use that in the `WHERE` clause to reliably update specific rows; otherwise, each row matching the `WHERE` clause is updated. When there's no `WHERE` clause, all rows in the table are updated. - -## Delete Rows in a Table - -To delete rows from a table, use [`DELETE FROM`](delete.html) followed by the table name and a `WHERE` clause identifying the rows to delete: - -{% include copy-clipboard.html %} -~~~ sql -> DELETE FROM accounts WHERE id in (5, 6); -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM accounts; -~~~ - -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.50 | -| 2 | 25000.00 | -| 3 | 8095.23 | -| 4 | 9394.60 | -+----+----------+ -(4 rows) -~~~ - -Just as with the `UPDATE` statement, if a table has a primary key, you can use that in the `WHERE` clause to reliably delete specific rows; otherwise, each row matching the `WHERE` clause is deleted. When there's no `WHERE` clause, all rows in the table are deleted. - -## What's Next? - -- Explore all [SQL Statements](sql-statements.html) -- [Use the built-in SQL client](use-the-built-in-sql-client.html) to execute statements from a shell or directly from the command line -- [Install the client driver](install-client-drivers.html) for your preferred language and [build an app](build-an-app-with-cockroachdb.html) -- [Explore core CockroachDB features](demo-data-replication.html) like automatic replication, rebalancing, and fault tolerance - diff --git a/src/current/v1.0/manual-deployment-insecure.md b/src/current/v1.0/manual-deployment-insecure.md deleted file mode 100644 index 30b1c43cb1e..00000000000 --- a/src/current/v1.0/manual-deployment-insecure.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -title: Manual Deployment (Insecure) -summary: Learn how to manually deploy an insecure, multi-node CockroachDB cluster on multiple machines. -toc: true ---- - - - -This tutorial shows you how to manually deploy an insecure multi-node CockroachDB cluster on multiple machines, using [HAProxy](http://www.haproxy.org/) load balancers to distribute client traffic. - -{{site.data.alerts.callout_danger}}If you plan to use CockroachDB in production, we strongly recommend using a secure cluster instead. Select Secure above for instructions.{{site.data.alerts.end}} - - -## Requirements - -- You must have SSH access to each machine. This is necessary for distributing binaries. -- Your network configuration must allow TCP communication on the following ports: - - **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster) and for clients to connect to HAProxy - - **8080** (`tcp:8080`) to expose your Admin UI - -## Recommendations - -- If you plan to use CockroachDB in production, we recommend using a [secure cluster](manual-deployment.html) instead. Using an insecure cluster comes with risks: - - Your cluster is open to any client that can access any node's IP addresses. - - Any user, even `root`, can log in without providing a password. - - Any user, connecting as `root`, can read or write any data in your cluster. - - There is no network encryption or authentication, and thus no confidentiality. - -- For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -## Step 1. Start the first node - -1. SSH to your first machine. - -2. Install CockroachDB from our latest binary: - - ~~~ shell - # Get the latest CockroachDB tarball: - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary: - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary: - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node: - - ~~~ shell - $ cockroach start --insecure \ - --host= - ~~~ - - This commands starts an insecure node and identifies the address at which other nodes can reach it, in this case an internal address since you likely do not want applications outside your network reaching an insecure cluster. Otherwise, it uses all available defaults. For example, the node stores data in the `cockroach-data` directory, listens for internal and client communication on port 26257, and listens for HTTP requests from the Admin UI on port 8080. To set these options manually, see [Start a Node](start-a-node.html). - -## Step 2. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by starting and joining additional nodes. - -1. SSH to another machine. - -2. Install CockroachDB from our latest binary: - - ~~~ shell - # Get the latest CockroachDB tarball: - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary: - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary: - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's address: - - ~~~ shell - $ cockroach start --insecure \ - --host= \ - --join=:26257 - ~~~ - - The only difference when adding a node is that you connect it to the cluster with the `--join` flag, which takes the address and port of the first node. Otherwise, it's fine to accept all defaults; since each node is on a unique machine, using identical ports will not cause conflicts. - -4. Repeat these steps for each node you want to add. - -## Step 3. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. SSH to your first node. - -2. Launch the built-in SQL client and create a database: - - ~~~ shell - $ cockroach sql --insecure --host= - ~~~ - - ~~~ sql - > CREATE DATABASE insecurenodetest; - ~~~ - -3. In another terminal window, SSH to another node. - -4. Launch the built-in SQL client: - - ~~~ shell - $ cockroach sql --insecure --host= - ~~~ - -5. View the cluster's databases, which will include `insecurenodetest`: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 4. Set up HAProxy load balancers - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - {{site.data.alerts.callout_success}}With a single load balancer, client connections are resilient to node failure, but the load balancer itself is a point of failure. It's therefore best to make load balancing resilient as well by using multiple load balancing instances, with a mechanism like floating IPs or DNS to select load balancers for clients.{{site.data.alerts.end}} - -[HAProxy](http://www.haproxy.org/) is one of the most popular open-source TCP load balancers, and CockroachDB includes a built-in command for generating a configuration file that is preset to work with your running cluster, so we feature that tool here. - -1. SSH to the machine where you want to run HAProxy. - -2. Install HAProxy: - - ~~~ shell - $ apt-get install haproxy - ~~~ - -3. Install CockroachDB from our latest binary: - - ~~~ shell - # Get the latest CockroachDB tarball. - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - - # Extract the binary. - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - - # Move the binary. - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -4. Run the [`cockroach gen haproxy`](generate-cockroachdb-resources.html) command, specifying the address of any CockroachDB node: - - ~~~ shell - $ cockroach gen haproxy --insecure \ - --host= \ - --port=26257 \ - ~~~ - - By default, the generated configuration file is called `haproxy.cfg` and looks as follows, with the `server` addresses pre-populated correctly: - - ~~~ shell - global - maxconn 4096 - - defaults - mode tcp - timeout connect 10s - timeout client 1m - timeout server 1m - - listen psql - bind :26257 - mode tcp - balance roundrobin - server cockroach1 :26257 - server cockroach2 :26257 - server cockroach3 :26257 - ~~~ - - The file is preset with the minimal [configurations](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html) needed to work with your running cluster: - - Field | Description - ------|------------ - `timout connect`
    `timeout client`
    `timeout server` | Timeout values that should be suitable for most deployments. - `bind` | The port that HAProxy listens on. This is the port clients will connect to and thus needs to be allowed by your network configuration.

    This tutorial assumes HAProxy is running on a separate machine from CockroachDB nodes. If you run HAProxy on the same machine as a node (not recommended), you'll need to change this port, as `26257` is also used for inter-node communication. - `balance` | The balancing algorithm. This is set to `roundrobin` to ensure that connections get rotated amongst nodes (connection 1 on node 1, connection 2 on node 2, etc.). Check the [HAProxy Configuration Manual](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html#4-balance) for details about this and other balancing algorithms. - `server` | For each node in the cluster, this field specifies the interface that the node listens on, i.e., the address passed in the `--host` flag on node startup. - - {{site.data.alerts.callout_info}}For full details on these and other configuration settings, see the HAProxy Configuration Manual.{{site.data.alerts.end}} - -5. Start HAProxy, with the `-f` flag pointing to the `haproxy.cfg` file: - - ~~~ shell - $ haproxy -f haproxy.cfg - ~~~ - -6. Repeat these steps for each additional instance of HAProxy you want to run. - -## Step 5. Test load balancing - -Now that HAProxy is running, it can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients can connect to a HAProxy server, which will then redirect the connection to a CockroachDB node. - -To test this, install CockroachDB locally and use the [built-in SQL client](use-the-built-in-sql-client.html) as follows: - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if it's not there already. - -2. Launch the built-in SQL client, with the `--host` flag set to the address of one of the HAProxy servers: - - ~~~ shell - $ cockroach sql --insecure \ - --host= \ - --port=26257 - ~~~ - -3. View the cluster's databases: - - ~~~ sql - > SHOW DATABASES; - ~~~ - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | insecurenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, HAProxy redirected the query to one of the CockroachDB nodes. - -4. Check which node you were redirected to: - - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - ~~~ - +---------+ - | node_id | - +---------+ - | 3 | - +---------+ - (1 row) - ~~~ - -5. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 6. Configure replication - -In CockroachDB, you use **replication zones** to control the number and location of replicas for specific sets of data. Initially, there is a single, default replication zone for the entire cluster. You can adjust this default zone as well as add zones for individual databases and tables as needed. - -For more information, see [Configure Replication Zones](configure-replication-zones.html). - -## Step 7. Use the cluster - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the HAProxy server, not to a CockroachDB node. - -## Step 8. Monitor the cluster - -View your cluster's Admin UI by going to `http://:8080`. - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - - Also check the **Replicas** column. If you have nodes with 0 replicas, it's possible you didn't properly set the `--host` flag. This prevents the node from receiving replicas and working as part of the cluster. - -2. Click the **Databases** tab on the left to verify that `insecurenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## See Also - -- [Cloud Deployment](cloud-deployment.html) -- [Orchestration](orchestration.html) -- [Monitoring](monitor-cockroachdb-with-prometheus.html) -- [Start a Local Cluster](start-a-local-cluster.html) -- [Run CockroachDB in a VirtualBox VM](http://uptimedba.github.io/cockroach-vb-single/cockroach-vb-single/home.html) (community-supported) diff --git a/src/current/v1.0/manual-deployment.md b/src/current/v1.0/manual-deployment.md deleted file mode 100644 index ef81823228a..00000000000 --- a/src/current/v1.0/manual-deployment.md +++ /dev/null @@ -1,458 +0,0 @@ ---- -title: Manual Deployment -summary: Learn how to manually deploy a secure, multi-node CockroachDB cluster on multiple machines. -toc: true ---- - - - -This tutorial shows you how to manually deploy a secure multi-node CockroachDB cluster on multiple machines, using [HAProxy](http://www.haproxy.org/) load balancers to distribute client traffic. - -If you are only testing CockroachDB, or you are not concerned with protecting network communication with TLS encryption, you can use an insecure cluster instead. Select **Insecure** above for instructions. - - -## Requirements - -- You must have [CockroachDB installed](install-cockroachdb.html) locally. This is necessary for generating and managing your deployment's certificates. -- You must have SSH access to each machine. This is necessary for distributing binaries and certificates. -- Your network configuration must allow TCP communication on the following ports: - - **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster) and for clients to connect to HAProxy - - **8080** (`tcp:8080`) to expose your Admin UI - -## Recommendations - -For guidance on cluster topology, clock synchronization, and file descriptor limits, see [Recommended Production Settings](recommended-production-settings.html). - -## Step 1. Generate certificates - -Locally, you'll need to [create the following certificates and keys](create-security-certificates.html): - -- A certificate authority (CA) key pair (`ca.crt` and `ca.key`). -- A node key pair for each node, issued to its IP addresses and any common names the machine uses, as well as to the IP addresses and common names for machines running HAProxy. -- A client key pair for the `root` user. - -{{site.data.alerts.callout_success}}Before beginning, it's useful to collect each of your machine's internal and external IP addresses, as well as any server names you want to issue certificates for.{{site.data.alerts.end}} - -1. [Install CockroachDB](install-cockroachdb.html) on your local machine, if you haven't already. - -2. Create two directories: - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir certs - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir my-safe-directory - ~~~ - - `certs`: You'll generate your CA certificate and all node and client certificates and keys in this directory and then upload some of the files to your nodes. - - `my-safe-directory`: You'll generate your CA key in this directory and then reference the key when generating node and client certificates. After that, you'll keep the key safe and secret; you will not upload it to your nodes. - -3. Create the CA certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-ca \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -4. Create the certificate and key for the first node, issued to all common names you might use to refer to the node as well as to the HAProxy instances: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -5. Upload certificates to the first node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -6. Delete the local copy of the node certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ rm certs/node.crt certs/node.key - ~~~ - - {{site.data.alerts.callout_info}}This is necessary because the certificates and keys for additional nodes will also be named node.crt and node.key As an alternative to deleting these files, you can run the next cockroach cert create-node commands with the --overwrite flag.{{site.data.alerts.end}} - -7. Create the certificate and key for the second node, issued to all common names you might use to refer to the node as well as to the HAProxy instances: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - \ - \ - \ - \ - localhost \ - 127.0.0.1 \ - \ - \ - \ - \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -8. Upload certificates to the second node: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the certs directory: - $ ssh @ "mkdir certs" - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Upload the CA certificate and node certificate and key: - $ scp certs/ca.crt \ - certs/node.crt \ - certs/node.key \ - @:~/certs - ~~~ - -9. Repeat steps 6 - 8 for each additional node. - -10. Create a client certificate and key for the `root` user: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-client \ - root \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {{site.data.alerts.callout_success}}In later steps, you'll use the root user's certificate to run cockroach client commands from your local machine. If you might also want to run cockroach client commands directly on a node (e.g., for local debugging), you'll need to copy the root user's certificate and key to that node as well.{{site.data.alerts.end}} - -## Step 2. Start the first node - -1. SSH to your first machine. - -2. Install CockroachDB from our latest binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball: - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary: - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary: - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new CockroachDB cluster with a single node: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --background \ - --certs-dir=certs \ - --host= - ~~~ - - This command specifies the location of certificates and the address at which other nodes can reach it. Otherwise, it uses all available defaults. For example, the node stores data in the `cockroach-data` directory, binds internal and client communication to port 26257, and binds Admin UI HTTP requests to port 8080. To set these options manually, see [Start a Node](start-a-node.html). - -## Step 3. Add nodes to the cluster - -At this point, your cluster is live and operational but contains only a single node. Next, scale your cluster by starting and joining additional nodes. - -1. SSH to another machine. - -2. Install CockroachDB from our latest binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball: - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary: - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary: - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -3. Start a new node that joins the cluster using the first node's address: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --background \ - --certs-dir=certs \ - --host= \ - --join=:26257 - ~~~ - - The only difference when adding a node is that you connect it to the cluster with the `--join` flag, which takes the address and port of the first node. Otherwise, it's fine to accept all defaults; since each node is on a unique machine, using identical ports will not cause conflicts. - -4. Repeat these steps for each node you want to add. - -## Step 4. Test your cluster - -CockroachDB replicates and distributes data for you behind-the-scenes and uses a [Gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, launch the built-in SQL client with the `--host` flag set to the address of node 1 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. Create a `securenodetest` database: - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE DATABASE securenodetest; - ~~~ - -3. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -4. Launch the built-in SQL client with the `--host` flag set to the address of node 2 and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -5. View the cluster's databases, which will include `securenodetest`: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - -6. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 5. Set up HAProxy load balancers - -Each CockroachDB node is an equally suitable SQL gateway to your cluster, but to ensure client performance and reliability, it's important to use load balancing: - -- **Performance:** Load balancers spread client traffic across nodes. This prevents any one node from being overwhelmed by requests and improves overall cluster performance (queries per second). - -- **Reliability:** Load balancers decouple client health from the health of a single CockroachDB node. In cases where a node fails, the load balancer redirects client traffic to available nodes. - {{site.data.alerts.callout_success}}With a single load balancer, client connections are resilient to node failure, but the load balancer itself is a point of failure. It's therefore best to make load balancing resilient as well by using multiple load balancing instances, with a mechanism like floating IPs or DNS to select load balancers for clients.{{site.data.alerts.end}} - -[HAProxy](http://www.haproxy.org/) is one of the most popular open-source TCP load balancers, and CockroachDB includes a built-in command for generating a configuration file that is preset to work with your running cluster, so we feature that tool here. - -1. On your local machine, run the [`cockroach gen haproxy`](generate-cockroachdb-resources.html) command with the `--host` flag set to the address of any node and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach gen haproxy \ - --certs-dir=certs \ - --host=
    \ - --port=26257 - ~~~ - - By default, the generated configuration file is called `haproxy.cfg` and looks as follows, with the `server` addresses pre-populated correctly: - - ~~~ shell - global - maxconn 4096 - - defaults - mode tcp - timeout connect 10s - timeout client 1m - timeout server 1m - - listen psql - bind :26257 - mode tcp - balance roundrobin - server cockroach1 :26257 - server cockroach2 :26257 - server cockroach3 :26257 - ~~~ - - The file is preset with the minimal [configurations](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html) needed to work with your running cluster: - - Field | Description - ------|------------ - `timout connect`
    `timeout client`
    `timeout server` | Timeout values that should be suitable for most deployments. - `bind` | The port that HAProxy listens on. This is the port clients will connect to and thus needs to be allowed by your network configuration.

    This tutorial assumes HAProxy is running on a separate machine from CockroachDB nodes. If you run HAProxy on the same machine as a node (not recommended), you'll need to change this port, as `26257` is also used for inter-node communication. - `balance` | The balancing algorithm. This is set to `roundrobin` to ensure that connections get rotated amongst nodes (connection 1 on node 1, connection 2 on node 2, etc.). Check the [HAProxy Configuration Manual](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html#4-balance) for details about this and other balancing algorithms. - `server` | For each node in the cluster, this field specifies the interface that the node listens on, i.e., the address passed in the `--host` flag on node startup. - - {{site.data.alerts.callout_info}}For full details on these and other configuration settings, see the HAProxy Configuration Manual.{{site.data.alerts.end}} - -2. Upload the `haproxy.cfg` file to the machine where you want to run HAProxy: - - {% include copy-clipboard.html %} - ~~~ shell - $ scp haproxy.cfg @:~/ - ~~~ - -3. SSH to the machine where you want to run HAProxy. - -4. Install HAProxy: - - {% include copy-clipboard.html %} - ~~~ shell - $ apt-get install haproxy - ~~~ - -5. Start HAProxy, with the `-f` flag pointing to the `haproxy.cfg` file: - - {% include copy-clipboard.html %} - ~~~ shell - $ haproxy -f haproxy.cfg - ~~~ - -6. Repeat these steps for each additional instance of HAProxy you want to run. - -## Step 6. Test load balancing - -Now that HAProxy is running, it can serve as the client gateway to the cluster. Instead of connecting directly to a CockroachDB node, clients can connect to a HAProxy server, which will then redirect the connection to a CockroachDB node. - -To test this, use the [built-in SQL client](use-the-built-in-sql-client.html) locally as follows: - -1. On your local machine, launch the built-in SQL client with the `--host` flag set to the address of any HAProxy server and security flags pointing to the CA cert and the client cert and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach sql \ - --certs-dir=certs \ - --host= - ~~~ - -2. View the cluster's databases: - - {% include copy-clipboard.html %} - ~~~ sql - > SHOW DATABASES; - ~~~ - - ~~~ - +--------------------+ - | Database | - +--------------------+ - | crdb_internal | - | information_schema | - | securenodetest | - | pg_catalog | - | system | - +--------------------+ - (5 rows) - ~~~ - - As you can see, HAProxy redirected the query to one of the CockroachDB nodes. - -3. Check which node you were redirected to: - - {% include copy-clipboard.html %} - ~~~ sql - > SELECT node_id FROM crdb_internal.node_build_info LIMIT 1; - ~~~ - - ~~~ - +---------+ - | node_id | - +---------+ - | 3 | - +---------+ - (1 row) - ~~~ - -4. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 7. Configure replication - -In CockroachDB, you use **replication zones** to control the number and location of replicas for specific sets of data. Initially, there is a single, default replication zone for the entire cluster. You can adjust this default zone as well as add zones for individual databases and tables as needed. - -For more information, see [Configure Replication Zones](configure-replication-zones.html). - -## Step 8. Use the cluster - -Now that your deployment is working, you can: - -1. [Implement your data model](sql-statements.html). -2. [Create users](create-and-manage-users.html) and [grant them privileges](grant.html). -3. [Connect your application](install-client-drivers.html). Be sure to connect your application to the HAProxy server, not to a CockroachDB node. - -## Step 9. Monitor the cluster - -View your cluster's Admin UI by going to `https://:8080`. - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - - Also check the **Replicas** column. If you have nodes with 0 replicas, it's possible you didn't properly set the `--host` flag. This prevents the node from receiving replicas and working as part of the cluster. - -2. Click the **Databases** tab on the left to verify that `insecurenodetest` is listed. - -{% include {{ page.version.version }}/misc/prometheus-callout.html %} - -## See Also - -- [Cloud Deployment](cloud-deployment.html) -- [Orchestration](orchestration.html) -- [Monitoring](monitor-cockroachdb-with-prometheus.html) -- [Start a Local Cluster](start-a-local-cluster.html) -- [Run CockroachDB in a VirtualBox VM](http://uptimedba.github.io/cockroach-vb-single/cockroach-vb-single/home.html) (community-supported) diff --git a/src/current/v1.0/monitor-cockroachdb-with-prometheus.md b/src/current/v1.0/monitor-cockroachdb-with-prometheus.md deleted file mode 100644 index 2f76fe7f30b..00000000000 --- a/src/current/v1.0/monitor-cockroachdb-with-prometheus.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -title: Monitor CockroachDB with Prometheus -summary: How to pull CockroachDB's time series metrics into Prometheus. -toc: true ---- - -CockroachDB generates detailed time series metrics for each node in a cluster. This page shows you how to pull these metrics into [Prometheus](https://prometheus.io/), an open source tool for storing, aggregating, and querying time series data. It also shows you how to connect [Grafana](https://grafana.com/) and [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) to Prometheus for flexible data visualizations and notifications. - -{{site.data.alerts.callout_success}}All files used in this tutorial can be found in the monitoring directory of the CockroachDB repository.{{site.data.alerts.end}} - - -## Before You Begin - -Make sure you have already started a CockroachDB cluster, either [locally](start-a-local-cluster.html) or in a [production environment](cloud-deployment.html). - -## Step 1. Install Prometheus - -1. Download the [2.x Prometheus tarball](https://prometheus.io/download/) for your OS. - -2. Extract the binary and add it to your `PATH`. This makes it easy to start Prometheus from any shell. - -3. Make sure Prometheus installed successfully: - - ~~~ shell - $ prometheus --version - ~~~ - - ~~~ - prometheus, version 2.2.1 (branch: HEAD, revision: bc6058c81272a8d938c05e75607371284236aadc) - build user: root@149e5b3f0829 - build date: 20180314-14:21:40 - go version: go1.10 - ~~~ - -## Step 2. Configure Prometheus - -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: - - {% include copy-clipboard.html %} - ~~~ shell - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml \ - -O prometheus.yml - ~~~ - - When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: - - `scrape_interval: 10s` defines the scrape interval. - - `metrics_path: '/_status/vars'` defines the Prometheus-specific CockroachDB endpoint for scraping time series metrics. - - `scheme: 'http'` specifies that the cluster being scraped is insecure. - - `targets: ['localhost:8080']` specifies the hostname and `http-port` of the Cockroach node to collect time series metrics on. - -2. Edit the configuration file to match your deployment scenario: - - Scenario | Config Change - ---------|-------------- - Multi-node local cluster | Expand the `targets` field to include `'localhost:'` for each additional node. - Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. - Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. - -4. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir rules - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ cd rules - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ wget -P rules https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ wget -P rules https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml - ~~~ - -## Step 3. Start Prometheus - -1. Start the Prometheus server, with the `--config.file` flag pointing to the configuration file: - - ~~~ shell - $ prometheus --config.file=prometheus.yml - ~~~ - - ~~~ - INFO[0000] Starting prometheus (version=1.4.1, branch=master, revision=2a89e8733f240d3cd57a6520b52c36ac4744ce12) source=main.go:77 - INFO[0000] Build context (go=go1.7.3, user=root@e685d23d8809, date=20161128-10:02:41) source=main.go:78 - INFO[0000] Loading configuration file prometheus.yml source=main.go:250 - INFO[0000] Loading series map and head chunks... source=storage.go:354 - INFO[0000] 0 series loaded. source=storage.go:359 - INFO[0000] Listening on :9090 source=web.go:248 - INFO[0000] Starting target manager... source=targetmanager.go:63 - ~~~ - -2. Point your browser to `http://:9090`, where you can use the Prometheus UI to query, aggregate, and graph CockroachDB time series metrics. - - Prometheus auto-completes CockroachDB time series metrics for you, but if you want to see a full listing, with descriptions, point your browser to `http://:8080/_status/vars`. - - For more details on using the Prometheus UI, see their [official documentation](https://prometheus.io/docs/introduction/getting_started/). - -## Step 4. Send notifications with Alertmanager - -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). - -1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. - -2. Extract the binary and add it to your `PATH`. This makes it easy to start Alertmanager from any shell. - -3. Make sure Alertmanager installed successfully: - - ~~~ shell - $ alertmanager --version - ~~~ - - ~~~ - alertmanager, version 0.15.0-rc.1 (branch: HEAD, revision: acb111e812530bec1ac6d908bc14725793e07cf3) - build user: root@f278953f13ef - build date: 20180323-13:07:06 - go version: go1.10 - ~~~ - -4. [Edit the Alertmanager configuration file](https://prometheus.io/docs/alerting/configuration/) that came with the binary, `simple.yml`, to specify the desired receivers for notifications. - -5. Start the Alertmanager server, with the `--config.file` flag pointing to the configuration file: - - ~~~ shell - $ alertmanager --config.file=simple.yml - ~~~ - -6. Point your browser to `http://:9093`, where you can use the Alertmanager UI to define rules for [silencing alerts](https://prometheus.io/docs/alerting/alertmanager/#silences). - -## Step 5. Visualize metrics in Grafana - -Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a much more powerful visualization tool that integrates with Prometheus easily. - -1. [Install and start Grafana for your OS](https://grafana.com/grafana/download). - -2. Point your browser to `http://:3000` and log into the Grafana UI with the default username/password, `admin/admin`, or create your own account. - -3. [Add Prometheus as a datasource](http://docs.grafana.org/datasources/prometheus/), and configure the datasource as follows: - - Field | Definition - ------|----------- - Name | Prometheus - Default | True - Type | Prometheus - Url | `http://:9090` - Access | Direct - -4. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards) for CockroachDB: - - ~~~ shell - # runtime dashboard: node status, including uptime, memory, and cpu. - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/runtime.json - - # storage dashboard: storage availability. - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/storage.json - - # sql dashboard: sql queries/transactions. - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/sql.json - - # replicas dashboard: replica information and operations. - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/replication.json - ~~~ - -5. [Add the dashboards to Grafana](http://docs.grafana.org/reference/export_import/#importing-a-dashboard). diff --git a/src/current/v1.0/multi-active-availability.md b/src/current/v1.0/multi-active-availability.md deleted file mode 100644 index 0a70e5256eb..00000000000 --- a/src/current/v1.0/multi-active-availability.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Multi-Active Availability -summary: Learn about CockroachDB's high availability model, known as Multi-Active Availability. -toc: true ---- - -CockroachDB's availability model is described as "Multi-Active Availability." In essence, multi-active availability provides benefits similar to traditional notions of high availability, but also lets you read and write from every node in your cluster without generating any conflicts. - - -## What is High Availability? - -High availability lets an application continue running even if a system hosting one of its services fails. This is achieved by scaling the application's services horizontally, i.e., replicating the service across many machines or systems. If any one of them fails, the others can simply step in and perform the same service. - -Before diving into the details of CockroachDB's multi-active availability, we'll review the two most common high availability designs: [Active-Passive](#active-passive) and [Active-Active](#active-active) systems. - -### Active-Passive - -In active-passive systems, all traffic is routed to a single, "active" replica. Changes to the replica's state are then copied to a backup "passive" replica, in an attempt to always mirror the active replica as closely as possible. - -However, this design has downsides: - -- If you use asynchronous replication, you cannot guarantee that any data is ever successfully replicated to passive followers––meaning you can easily lose data. Depending on your industry, this could have pretty dire consequences. -- If you use synchronous replication and any passive replicas fail, you have to either sacrifice availability for the entire application or risk inconsistencies. - -### Active-Active - -In active-active systems, multiple replicas run identical services, and traffic is routed to all of them. If any replica fails, the others simply handle the traffic that would've been routed to it. - -For databases, though, active-active replication is incredibly difficult to instrument for most workloads. For example, if you let multiple replicas handle writes for the same keys, how do you keep them consistent? - -#### Example: Conflicts with Active-Active Replication - -For this example, we have 2 replicas (**A**, **B**) in an active-active high availability cluster. - -1. **A** receives a write for key `xyz` of `'123'`, and then immediately fails. -2. **B** receives a read of key `xyz`, and returns a `NULL` because it cannot find the key. -3. **B** then receives a write for key `xyz` of `'456'`. -4. **A** is restarted and attempts to rejoin **B**––but what do you do about key `xyz`? There's an inconsistency in the system without a clear way to resolve it. - -{{site.data.alerts.callout_info}}In this example, the cluster remained active the entire time. But in terms of the CAP theorem, this is an AP system; it favored being available instead of consistent when partitions occur.{{site.data.alerts.end}} - -## What is Multi-Active Availability? - -Multi-active availability is CockroachDB's version of high availability (keeping your application online in the face of partial failures), which we've designed to avoid the downsides of both active-passive and traditional active-active systems. - -Like active-active designs, all replicas can handle traffic, including both reads and writes. However, CockroachDB improves upon that design by also ensuring that data remains consistent across them, which we achieve by using "consensus replication." In this design, replication requests are sent to at least 3 replicas, and are only considered committed when a majority of replicas acknowledge that they've received it. This means that you can still have failures without compromising availability. - -To prevent conflicts and guarantee your data's consistency, clusters that lose a majority of replicas stop responding because they've lost the ability to reach a consensus on the state of your data. When a majority of replicas are restarted, your database resumes operation. - -### Consistency Example - -For this example, we have 3 CockroachDB nodes (**A**, **B**, **C**) in a multi-active availability cluster. - -1. **A** receives a write on `xyz` of `'123'`. It communicates this write to nodes **B** and **C**, who confirm that they've received the write, as well. Once **A** receives the first confirmation, the change is committed. -2. **A** fails. -3. **B** receives a read of key `xyz`, and returns the result `'123'`. -4. **C** then receives an update for key `xyz` to the values `'456'`. It communicates this write to node **B**, who confirms that its received the write, as well. After receiving the confirmation, the change is committed. -5. **A** is restarted and rejoins the cluster. It receives an update that the key `xyz` had its value changed to `'456'`. - -{{site.data.alerts.callout_info}}In this example, if nodes B or C failed at any time, the cluster would have stopped responding. In terms of the CAP theorem, this is a CP system; it favored being consistent instead of available when partitions occur.{{site.data.alerts.end}} - -## What's next? - -To get a greater understanding of how CockroachDB is a survivable system that enforces strong consistency, check out our [architecture documentation](architecture/overview.html). - -To see Multi-Active Availability in action, see this [availability demo](demo-fault-tolerance-and-recovery.html). diff --git a/src/current/v1.0/not-null.md b/src/current/v1.0/not-null.md deleted file mode 100644 index 9d1c3ce8130..00000000000 --- a/src/current/v1.0/not-null.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Not Null Constraint -summary: The NOT NULL constraint specifies the column may not contain NULL values. -toc: true ---- - -The Not Null [constraint](constraints.html) specifies a column may not contain *NULL* values. - - -## Details - -- `INSERT` or `UPDATE` statements containing *NULL* values are rejected. This includes `INSERT` statements that do not include values for any columns that do not have a [Default Value constraint](default-value.html). - - For example, if the table `foo` has columns `a` and `b` (and `b` *does not* have a Default Value), when you run the following command: - - ~~~ sql - > INSERT INTO foo (a) VALUES (1); - ~~~ - - CockroachDB tries to write a *NULL* value into column `b`. If that column has the Not Null constraint, the `INSERT` statement is rejected. - -- You can only define the Not Null constraint when [creating a table](#syntax); you cannot add it to an existing table. However, you can [migrate data](constraints.html#table-migrations-to-add-or-change-immutable-constraints) from your current table to a new table with the constraint you want to use. - {{site.data.alerts.callout_info}}In the future we plan to support adding the Not Null constraint to existing tables.{{site.data.alerts.end}} - -- For more information about *NULL*, see [Null Handling](null-handling.html). - -## Syntax - -You can only apply the Not Null constraint to individual columns. - -{% include {{ page.version.version }}/sql/diagrams/not_null_column_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_name` | The name of the constrained column. | -| `column_type` | The constrained column's [data type](data-types.html). | -| `column_constraints` | Any other column-level [constraints](constraints.html) you want to apply to this column. | -| `column_def` | Definitions for any other columns in the table. | -| `table_constraints` | Any table-level [constraints](constraints.html) you want to apply. | - -## Usage Example - -~~~ sql -> CREATE TABLE IF NOT EXISTS customers ( - customer_id INT PRIMARY KEY, - cust_name STRING(30) NULL, - cust_email STRING(100) NOT NULL - ); - -> INSERT INTO customers (customer_id, cust_name, cust_email) VALUES (1, 'Smith', NULL); -~~~ -~~~ -pq: null value in column "cust_email" violates not-null constraint -~~~ -~~~ sql -> INSERT INTO customers (customer_id, cust_name) VALUES (1, 'Smith'); -~~~ -~~~ -pq: null value in column "cust_email" violates not-null constraint -~~~ - -## See Also - -- [Constraints](constraints.html) -- [`DROP CONSTRAINT`](drop-constraint.html) -- [Check constraint](check.html) -- [Default Value constraint](default-value.html) -- [Foreign Key constraint](foreign-key.html) -- [Primary Key constraint](primary-key.html) -- [Unique constraint](unique.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) diff --git a/src/current/v1.0/null-handling.md b/src/current/v1.0/null-handling.md deleted file mode 100644 index 527c96a667b..00000000000 --- a/src/current/v1.0/null-handling.md +++ /dev/null @@ -1,355 +0,0 @@ ---- -title: NULL Handling -summary: Learn how NULL values are handled in CockroachDB SQL. -toc: true ---- - -This page summarizes how `NULL` values are handled in CockroachDB SQL. Each topic is demonstrated via the [built-in SQL client](use-the-built-in-sql-client.html), using the table data in the first section, [NULLs and Logic](#nulls-and-logic). - -{{site.data.alerts.callout_info}}When using the built-in client, NULL values are displayed using the word NULL. This distinguishes them from a character field that contains an empty string ("").{{site.data.alerts.end}} - - -## NULLs and Logic - -Any comparison between a value and `NULL` results in `NULL`. This behavior is consistent with PostgresSQL as well as all other major RDBMS's. - -~~~ sql -> CREATE TABLE t1( - a INT, - b INT, - c INT -); - -> INSERT INTO t1 VALUES(1, 0, 0); -> INSERT INTO t1 VALUES(2, 0, 1); -> INSERT INTO t1 VALUES(3, 1, 0); -> INSERT INTO t1 VALUES(4, 1, 1); -> INSERT INTO t1 VALUES(5, NULL, 0); -> INSERT INTO t1 VALUES(6, NULL, 1); -> INSERT INTO t1 VALUES(7, NULL, NULL); - -> SELECT * FROM t1; -~~~ -~~~ -+---+------+------+ -| a | b | c | -+---+------+------+ -| 1 | 0 | 0 | -| 2 | 0 | 1 | -| 3 | 1 | 0 | -| 4 | 1 | 1 | -| 5 | NULL | 0 | -| 6 | NULL | 1 | -| 7 | NULL | NULL | -+---+------+------+ -~~~ -~~~ sql -> SELECT * FROM t1 WHERE b < 10; -~~~ -~~~ -+---+---+---+ -| a | b | c | -+---+---+---+ -| 1 | 0 | 0 | -| 2 | 0 | 1 | -| 3 | 1 | 0 | -| 4 | 1 | 1 | -+---+---+---+ -~~~ -~~~ sql -> SELECT * FROM t1 WHERE NOT b > 10; -~~~ -~~~ -+---+---+---+ -| a | b | c | -+---+---+---+ -| 1 | 0 | 0 | -| 2 | 0 | 1 | -| 3 | 1 | 0 | -| 4 | 1 | 1 | -+---+---+---+ -~~~ -~~~ sql -> SELECT * FROM t1 WHERE b < 10 OR c = 1; -~~~ -~~~ -+---+------+---+ -| a | b | c | -+---+------+---+ -| 1 | 0 | 0 | -| 2 | 0 | 1 | -| 3 | 1 | 0 | -| 4 | 1 | 1 | -| 6 | NULL | 1 | -+---+------+---+ -~~~ -~~~ sql -> SELECT * FROM t1 WHERE b < 10 AND c = 1; -~~~ -~~~ -+---+---+---+ -| a | b | c | -+---+---+---+ -| 2 | 0 | 1 | -| 4 | 1 | 1 | -+---+---+---+ -~~~ -~~~ sql -> SELECT * FROM t1 WHERE NOT (b < 10 AND c = 1); -~~~ -~~~ -+---+------+---+ -| a | b | c | -+---+------+---+ -| 1 | 0 | 0 | -| 3 | 1 | 0 | -| 5 | NULL | 0 | -+---+------+---+ -~~~ -~~~ sql -> SELECT * FROM t1 WHERE NOT (c = 1 AND b < 10); -~~~ -~~~ -+---+------+---+ -| a | b | c | -+---+------+---+ -| 1 | 0 | 0 | -| 3 | 1 | 0 | -| 5 | NULL | 0 | -+---+------+---+ -~~~ - -Use the `IS NULL` or `IS NOT NULL` clauses when checking for `NULL` values. - -~~~ sql -> SELECT * FROM t1 WHERE b IS NULL AND c IS NOT NULL; -~~~ -~~~ -+---+------+---+ -| a | b | c | -+---+------+---+ -| 5 | NULL | 0 | -| 6 | NULL | 1 | -+---+------+---+ -~~~ - -## NULLs and Arithmetic - -Arithmetic operations involving a `NULL` value will yield a `NULL` result. - -~~~ sql -> SELECT a, b, c, b*0, b*c, b+c FROM t1; -~~~ -~~~ -+---+------+------+-------+-------+-------+ -| a | b | c | b * 0 | b * c | b + c | -+---+------+------+-------+-------+-------+ -| 1 | 0 | 0 | 0 | 0 | 0 | -| 2 | 0 | 1 | 0 | 0 | 1 | -| 3 | 1 | 0 | 0 | 0 | 1 | -| 4 | 1 | 1 | 0 | 1 | 2 | -| 5 | NULL | 0 | NULL | NULL | NULL | -| 6 | NULL | 1 | NULL | NULL | NULL | -| 7 | NULL | NULL | NULL | NULL | NULL | -+---+------+------+-------+-------+-------+ -~~~ - -## NULLs and Aggregate Functions - -Aggregate [functions](functions-and-operators.html) are those that operate on a set of rows and return a single value. The example data has been repeated here to make it easier to understand the results. - -~~~ sql -> SELECT * FROM t1; -~~~ -~~~ -+---+------+------+ -| a | b | c | -+---+------+------+ -| 1 | 0 | 0 | -| 2 | 0 | 1 | -| 3 | 1 | 0 | -| 4 | 1 | 1 | -| 5 | NULL | 0 | -| 6 | NULL | 1 | -| 7 | NULL | NULL | -+---+------+------+ -~~~ -~~~ sql -> SELECT COUNT(*), COUNT(b), SUM(b), AVG(b), MIN(b), MAX(b) FROM t1; -~~~ -~~~ -+----------+----------+--------+--------------------+--------+--------+ -| COUNT(*) | COUNT(b) | SUM(b) | AVG(b) | MIN(b) | MAX(b) | -+----------+----------+--------+--------------------+--------+--------+ -| 7 | 4 | 2 | 0.5000000000000000 | 0 | 1 | -+----------+----------+--------+--------------------+--------+--------+ -~~~ - -Note the following: - -- `NULL` values are not included in the `COUNT()` of a column. `COUNT(*)` returns 7 while `COUNT(b)` returns 4. - -- `NULL` values are not considered as high or low values in `MIN()` or `MAX()`. - -- `AVG(b)` returns `SUM(b)/COUNT(b)`, which is different than `AVG(*)` as `NULL` values are not considered in the `COUNT(b)` of rows. See [NULLs as Other Values](#nulls-as-other-values) for more details. - - -## NULL as a Distinct Value - -`NULL` values are considered distinct from other values and are included in the list of distinct values from a column. - -~~~ sql -> SELECT DISTINCT b FROM t1; -~~~ -~~~ -+------+ -| b | -+------+ -| 0 | -| 1 | -| NULL | -+------+ -~~~ - -However, counting the number of distinct values excludes `NULL`s, which is consistent with the `COUNT()` function. - -~~~ sql -> SELECT COUNT(DISTINCT b) FROM t1; -~~~ -~~~ -+-------------------+ -| count(DISTINCT b) | -+-------------------+ -| 2 | -+-------------------+ -~~~ - -## NULLs as Other Values - -In some cases, you may want to include `NULL` values in arithmetic or aggregate function calculations. To do so, use the `IFNULL()` function to substitute a value for `NULL` during calculations. - -For example, let's say you want to calculate the average value of column `b` as being the `SUM()` of all numbers in `b` divided by the total number of rows, regardless of whether `b`'s value is `NULL`. In this case, you would use `AVG(IFNULL(b, 0))`, where `IFNULL(b, 0)` substitutes a value of zero (0) for `NULL`s during the calculation. - -~~~ sql -> SELECT COUNT(*), COUNT(b), SUM(b), AVG(b), AVG(IFNULL(b, 0)), MIN(b), MAX(b) FROM t1; -~~~ -~~~ -+----------+----------+--------+--------------------+--------------------+--------+--------+ -| COUNT(*) | COUNT(b) | SUM(b) | AVG(b) | AVG(IFNULL(b, 0)) | MIN(b) | MAX(b) | -+----------+----------+--------+--------------------+--------------------+--------+--------+ -| 7 | 4 | 2 | 0.5000000000000000 | 0.2857142857142857 | 0 | 1 | -+----------+----------+--------+--------------------+--------------------+--------+--------+ -~~~ - -## NULLs and Set Operations - -`NULL` values are considered as part of a `UNION` set operation. - -~~~ sql -> SELECT b FROM t1 UNION SELECT b FROM t1; -~~~ -~~~ -+------+ -| b | -+------+ -| 0 | -| 1 | -| NULL | -+------+ -~~~ - - -## NULLs and Sorting - -When sorting a column containing `NULL` values, CockroachDB orders `NULL`s lower than the first non-`NULL` value. This differs from PostgreSQL, which orders `NULL`s higher than the last non-`NULL` value. - -Note that the `NULLS FIRST` and `NULLS LAST` options of the `ORDER BY` clause are not implemented in CockroachDB, so you cannot change where `NULL` values appear in the sort order. - -~~~ sql -> SELECT * FROM t1 ORDER BY b; -~~~ -~~~ -+---+------+------+ -| a | b | c | -+---+------+------+ -| 6 | NULL | 1 | -| 5 | NULL | 0 | -| 7 | NULL | NULL | -| 1 | 0 | 0 | -| 2 | 0 | 1 | -| 4 | 1 | 1 | -| 3 | 1 | 0 | -+---+------+------+ -~~~ -~~~ sql -> SELECT * FROM t1 ORDER BY b DESC; -~~~ -~~~ -+---+------+------+ -| a | b | c | -+---+------+------+ -| 4 | 1 | 1 | -| 3 | 1 | 0 | -| 2 | 0 | 1 | -| 1 | 0 | 0 | -| 7 | NULL | NULL | -| 6 | NULL | 1 | -| 5 | NULL | 0 | -+---+------+------+ -~~~ - -## NULLs and Unique Constraints - -`NULL` values are not considered unique. Therefore, if a table has a Unique constraint on one or more columns that are optional (nullable), it is possible to insert multiple rows with `NULL` values in those columns, as shown in the example below. - -~~~ sql -> CREATE TABLE t2(a INT, b INT UNIQUE); - -> INSERT INTO t2 VALUES(1, 1); -> INSERT INTO t2 VALUES(2, NULL); -> INSERT INTO t2 VALUES(3, NULL); - -> SELECT * FROM t2; -~~~ -~~~ -+---+------+ -| a | b | -+---+------+ -| 1 | 1 | -| 2 | NULL | -| 3 | NULL | -+---+------+ -~~~ - -## NULLs and CHECK Constraints - -A [Check constraint](check.html) expression that evaluates to `NULL` is considered to pass, allowing for concise expressions like `discount < price` without worrying about adding `OR discount IS NULL` clauses. When non-null validation is desired, the usual Not Null constraint can be used along side a Check constraint. - -~~~ sql -> CREATE TABLE products (id STRING PRIMARY KEY, price INT NOT NULL CHECK (price > 0), discount INT, CHECK (discount <= price)); - -> INSERT INTO products (id, price) VALUES ('ncc-1701-d', 100); -> INSERT INTO products (id, price, discount) VALUES ('ncc-1701-a', 100, 50); - -> SELECT * FROM products; -~~~ -~~~ -+----------+-------+----------+ -| id | price | discount | -+----------+-------+----------+ -| ncc1701a | 100 | 50 | -| ncc1701d | 100 | NULL | -+----------+-------+----------+ -~~~ -~~~ sql -> INSERT INTO products (id, price) VALUES ('ncc-1701-b', -5); -~~~ -~~~ -failed to satisfy CHECK constraint (price > 0) -~~~ -~~~ sql -> INSERT INTO products (id, price, discount) VALUES ('ncc-1701-b', 100, 150); -~~~ -~~~ -failed to satisfy CHECK constraint (discount <= price) -~~~ diff --git a/src/current/v1.0/open-source.md b/src/current/v1.0/open-source.md deleted file mode 100644 index 8750363997e..00000000000 --- a/src/current/v1.0/open-source.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Open Source -summary: CockroachDB is completely open source. -toc: false ---- - -Run on your laptop, development cluster, and public or private cloud without complex licensing, mock implementations, or inscrutable closed-source error output. Be a part of our vibrant community of developers and users! And if you really love databases, you can contribute to the design and implementation as it evolves. - -- Keep your options open and avoid vendor lock-in -- Easy experimentation and enhancement -- Bigger and more active community for support and troubleshooting -- Debug problems through your entire stack - -CockroachDB is open source diff --git a/src/current/v1.0/operational-faqs.md b/src/current/v1.0/operational-faqs.md deleted file mode 100644 index 95ea56028a6..00000000000 --- a/src/current/v1.0/operational-faqs.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Operational FAQs -summary: Get answers to frequently asked questions about operating CockroachDB. -toc: true ---- - - -## Why is my process hanging when I try to start it in the background? - -The first question that needs to be asked is whether or not you have previously -run a multi-node cluster using the same data directory. If you haven't, then you -should check out our [Cluster Setup Troubleshooting -docs](cluster-setup-troubleshooting.html). If you have previously started and -stopped a multi-node cluster and are now trying to bring it back up, you're in -the right place. - -In order to keep your data consistent, CockroachDB only works when at least a -majority of its nodes are running. This means that if only one node of a three -node cluster is running, that one node will not be able to do anything. The -`--background` flag of [`cockroach start`](start-a-node.html) causes the start -command to wait until the node has fully initialized and is able to start -serving queries. - -Together, these two facts mean that the `--background` flag will cause -`cockroach start` to hang until a majority of nodes are running. In order to -restart your cluster, you should either use multiple terminals so that you can -start multiple nodes at once or start each node in the background using your -shell's functionality (e.g., `cockroach start &`) instead of the `--background` -flag. - -## Why is memory usage increasing despite lack of traffic? - -If you start a CockroachDB node on your computer and let it run for hours or days, you might notice that its memory usage steadily grows for a while before plateauing at around 25% of your computer's total memory. This is expected behavior -- like most databases, CockroachDB caches the most recently accessed data in memory so that it can provide faster reads, and [its periodic writes of timeseries data](#why-is-disk-usage-increasing-despite-lack-of-writes) cause that cache size to increase until it hits its configured limit. The cache size limit defaults to 25% of the machine's memory, but can be controlled by setting the `--cache` flag when running [`cockroach start`](start-a-node.html). - -## Why is disk usage increasing despite lack of writes? - -The timeseries data used to power the graphs in the admin UI is stored within the cluster and accumulates for 30 days before it starts getting truncated. As a result, for the first 30 days or so of a cluster's life you will see a steady increase in disk usage and the number of ranges in the cluster even if you aren't writing data to it yourself. - -As of the 1.0 release, there is no way to change the number of days before timeseries data gets truncated. As a workaround, however, you can start each node with the `COCKROACH_METRICS_SAMPLE_INTERVAL` environment variable set higher than its default of `10s` to store fewer data points. For example, you could set it to `1m` to only collect data every 1 minute, which would result in storing 6x less timeseries data than the default setting. - -## Why does CockroachDB collect anonymized cluster usage details by default? - -Collecting information about CockroachDB's real world usage helps us prioritize the development of product features. We choose our default as "opt-in" to strengthen the information we receive from our collection efforts, but we also make a careful effort to send only anonymous, aggregate usage statistics. See [Diagnostics Reporting](diagnostics-reporting.html) for a detailed look at what information is sent and how to opt-out. - -## What happens when node clocks are not properly synchronized? - -CockroachDB needs moderately accurate time to preserve data consistency, so it's important to run [NTP](http://www.ntp.org/) or other clock synchronization software on each node. - -By default, CockroachDB's maximum allowed clock offset is 500ms. When a node detects that its clock offset, relative to other nodes, is half or more of the maximum allowed, it spontaneously shuts down. While [serializable consistency](https://en.wikipedia.org/wiki/Serializability) is maintained regardless of clock skew, skew outside the configured clock offset bounds can result in violations of single-key linearizability between causally dependent transactions. With NTP or other clock synchronization software running on each node, there's very little risk of ever exceeding the maximum offset and encountering such anomalies, and even on well-functioning hardware not running synchronization software, slow clock drift is most common, which CockroachDB handles safely. - -The one rare case to note is when a node's clock suddenly jumps beyond the maximum offset before the node detects it. Although extremely unlikely, this could occur, for example, when running CockroachDB inside a VM and the VM hypervisor decides to migrate the VM to different hardware with a different time. In this case, there can be a small window of time between when the node's clock becomes unsynchronized and when the node spontaneously shuts down. During this window, it would be possible for a client to read stale data and write data derived from stale reads. - -## See Also - -- [Product FAQs](frequently-asked-questions.html) -- [SQL FAQs](sql-faqs.html) diff --git a/src/current/v1.0/orchestrate-cockroachdb-with-docker-swarm-insecure.md b/src/current/v1.0/orchestrate-cockroachdb-with-docker-swarm-insecure.md deleted file mode 100644 index fa2baebd009..00000000000 --- a/src/current/v1.0/orchestrate-cockroachdb-with-docker-swarm-insecure.md +++ /dev/null @@ -1,347 +0,0 @@ ---- -title: Orchestrate CockroachDB with Docker Swarm -summary: How to orchestrate the deployment and management of an insecure three-node CockroachDB cluster as a Docker swarm. -toc: true ---- - - - -This page shows you how to orchestrate the deployment and management of an insecure three-node CockroachDB cluster as a [swarm of Docker Engines](https://docs.docker.com/engine/swarm/). - -If you plan to use CockroachDB in production, we recommend using a secure cluster instead. Select **Secure** above for instructions. - - -## Before You Begin - -Before you begin, it's helpful to review some terminology: - -Feature | Description ---------|------------ -instance | A physical or virtual machine. In this tutorial, you'll use three, one per CockroachDB node. -[Docker Engine](https://docs.docker.com/engine/) | This is the core Docker application that creates and runs containers. In this tutorial, you'll install and start Docker Engine on each of your three instances. -[swarm](https://docs.docker.com/engine/swarm/key-concepts/#/swarm) | A swarm is a group of Docker Engines joined into a single, virtual host. -[swarm node](https://docs.docker.com/engine/swarm/how-swarm-mode-works/nodes/) | Each member of a swarm is considered a node. In this tutorial, each instance will be a swarm node, one as the master node and the two others as worker nodes. You'll submit service definitions to the master node, which will dispatch work to the worker nodes. -[service](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) | A service is the definition of the tasks to execute on swarm nodes. In this tutorial, you'll define three services, each starting a CockroachDB node inside a container and joining it into a single cluster. Each service also ensures a stable network identity on restart via a resolvable DNS name. -[overlay network](https://docs.docker.com/engine/userguide/networking/#/an-overlay-network-with-docker-engine-swarm-mode) | An overlay network enables communication between the nodes of a swarm. In this tutorial, you'll create an overlay network and use it in each of your services. - -## Step 1. Create instances - -Create three instances, one for each node of your cluster. - -- For GCE-specific instructions, read through step 2 of [Deploy CockroachDB on GCE](deploy-cockroachdb-on-google-cloud-platform-insecure.html). -- For AWS-specific instructions, read through step 2 of [Deploy CockroachDB on AWS](deploy-cockroachdb-on-aws-insecure.html). - -Be sure to configure your network to allow TCP communication on these ports: - -- `26257` for inter-node communication (i.e., working as a cluster) and connecting with applications -- `8080` for exposing your Admin UI - -## Step 2. Install Docker Engine - -On each instance: - -1. [Install and start Docker Engine](https://docs.docker.com/engine/installation/). - -2. Confirm that the Docker daemon is running in the background: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker version - ~~~ - -## Step 3. Start the swarm - -1. On the instance where you want to run your manager node, [initialize the swarm](https://docs.docker.com/engine/swarm/swarm-tutorial/create-swarm/). - - Take note of the output for `docker swarm init` as it includes the command you'll use in the next step. It should look like this: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker swarm init --advertise-addr 10.142.0.2 - ~~~ - - ~~~ - Swarm initialized: current node (414z67gr5cgfalm4uriu4qdtm) is now a manager. - - To add a worker to this swarm, run the following command: - - $ docker swarm join \ - --token SWMTKN-1-5vwxyi6zl3cc62lqlhi1jrweyspi8wblh2i3qa7kv277fgy74n-e5eg5c7ioxypjxlt3rpqorh15 \ - 10.142.0.2:2377 - - To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. - ~~~ - -2. On the other two instances, [create a worker node joined to the swarm](https://docs.docker.com/engine/swarm/swarm-tutorial/add-nodes/) by running the `docker swarm join` command in the output from step 1, for example: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker swarm join \ - --token SWMTKN-1-5vwxyi6zl3cc62lqlhi1jrweyspi8wblh2i3qa7kv277fgy74n-e5eg5c7ioxypjxlt3rpqorh15 \ - 10.142.0.2:2377 - ~~~ - - ~~~ - This node joined a swarm as a worker. - ~~~ - -3. On the instance running your manager node, verify that your swarm is running: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker node ls - ~~~ - - ~~~ - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - 414z67gr5cgfalm4uriu4qdtm * instance-1 Ready Active Leader - ae144s35dx1p1lcegh6bblyed instance-2 Ready Active - aivjg2joxyvzvbksjsix27khy instance-3 Ready Active - ~~~ - -## Step 4. Create an overlay network - -On the instance running your manager node, create an overlay network so that the containers in your swarm can talk to each other: - -{% include copy-clipboard.html %} -~~~ shell -$ sudo docker network create --driver overlay cockroachdb -~~~ - -## Step 5. Start the CockroachDB cluster - -1. On the instance running your manager node, create the first service that the others will join to: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker service create \ - --replicas 1 \ - --name cockroachdb-1 \ - --hostname cockroachdb-1 \ - --network cockroachdb \ - --mount type=volume,source=cockroachdb-1,target=/cockroach/cockroach-data,volume-driver=local \ - --stop-grace-period 60s \ - --publish 8080:8080 \ - cockroachdb/cockroach:{{page.release_info.version}} start \ - --logtostderr \ - --insecure - ~~~ - - This command creates a service that starts a container, joins it to the overlay network, and starts the first CockroachDB node inside the container mounted to a local volume for persistent storage. Let's look at each part: - - `sudo docker service create`: The Docker command to create a new service. - - `--replicas`: The number of containers controlled by the service. Since each service will control one container running one CockroachDB node, this will always be `1`. - - `--name`: The name for the service. - - `--hostname`: The hostname of the container. It will listen for connections on this address. - - `--network`: The overlay network for the container to join. See [Step 4. Create an overlay network](#step-4-create-an-overlay-network) for more details. - - `--mount`: This flag mounts a local volume called `cockroachdb-1`. This means that data and logs for the node running in this container will be stored in `/cockroach/cockroach-data` on the instance and will be reused on restart as long as restart happens on the same instance, which is not guaranteed. - {{site.data.alerts.callout_info}}If you plan on replacing or adding instances, it's recommended to use remote storage instead of local disk. To do so, create a remote volume for each CockroachDB instance using the volume driver of your choice, and then specify that volume driver instead of the volume-driver=local part of the command above, e.g., volume-driver=gce if using the GCE volume driver. - - `--stop-grace-period`: This flag sets a grace period to give CockroachDB enough time to shut down gracefully, when possible. - - `--publish`: This flag makes the Admin UI accessible at the IP of any instance running a swarm node on port `8080`. Note that, even though this flag is defined only in the first node's service, the swarm exposes this port on every swarm node using a routing mesh. See [Publishing ports](https://docs.docker.com/engine/swarm/services/#publish-ports) for more details. - - `cockroachdb/cockroach:{{page.release_info.version}} start ...`: The CockroachDB command to [start a node](start-a-node.html) in the container in insecure mode and instruct other cluster members to talk to it using its persistent network address, `cockroachdb-1`. - -2. On the same instance, create the services to start two other CockroachDB nodes and join them to the cluster: - - {% include copy-clipboard.html %} - ~~~ shell - # Start the second service: - $ sudo docker service create \ - --replicas 1 \ - --name cockroachdb-2 \ - --hostname cockroachdb-2 \ - --network cockroachdb \ - --mount type=volume,source=cockroachdb-2,target=/cockroach/cockroach-data,volume-driver=local \ - --stop-grace-period 60s \ - cockroachdb/cockroach:{{page.release_info.version}} start \ - --join=cockroachdb-1:26257 \ - --logtostderr \ - --insecure - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Start the third service: - $ sudo docker service create \ - --replicas 1 \ - --name cockroachdb-3 \ - --hostname cockroachdb-3 \ - --network cockroachdb \ - --mount type=volume,source=cockroachdb-3,target=/cockroach/cockroach-data,volume-driver=local \ - --stop-grace-period 60s \ - cockroachdb/cockroach:{{page.release_info.version}} start \ - --join=cockroachdb-1:26257 \ - --logtostderr \ - --insecure - ~~~ - - There are only a few differences when creating the second two services: - - The `--name` is unique for each service. - - The CockroachDB command to [`start`](start-a-node.html) each node uses the `--join` flag to connect it to the cluster via the name of the first service and its default port, `cockroachdb-1:26257`. - -3. Verify that all three services were created successfully: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker service ls - ~~~ - - ~~~ - ID NAME MODE REPLICAS IMAGE - a6g0ur6857j6 cockroachdb-1 replicated 1/1 cockroachdb/cockroach:{{page.release_info.version}} - dr81a756gaa6 cockroachdb-2 replicated 1/1 cockroachdb/cockroach:{{page.release_info.version}} - il4m7op1afg9 cockroachdb-3 replicated 1/1 cockroachdb/cockroach:{{page.release_info.version}} - ~~~ - - {{site.data.alerts.callout_success}}The service definitions tell the CockroachDB nodes to log to stderr, so if you ever need access to a node's logs for troubleshooting, use sudo docker logs <container id> from the instance on which the container is running.{{site.data.alerts.end}} - -4. Remove the first service and recreate it again with the `--join` flag to ensure that, if the first node restarts, it will rejoin the original cluster via the second service, `cockroachdb-2`, instead of initiating a new cluster: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker service rm cockroachdb-1 - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker service create \ - --replicas 1 \ - --name cockroachdb-1 \ - --hostname cockroachdb-1 \ - --network cockroachdb \ - --mount type=volume,source=cockroachdb-1,target=/cockroach/cockroach-data,volume-driver=local \ - --stop-grace-period 60s \ - --publish 8080:8080 \ - cockroachdb/cockroach:{{page.release_info.version}} start \ - --join=cockroachdb-2:26257 \ - --logtostderr \ - --insecure - ~~~ - -## Step 6. Use the built-in SQL client - -1. On any instance, use the `sudo docker ps` command to get the ID of the container running the CockroachDB node: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker ps | grep cockroachdb - ~~~ - - ~~~ - 9539871cc769 cockroachdb/cockroach:{{page.release_info.version}} "/cockroach/cockroach" 2 minutes ago Up 2 minutes 8080/tcp, 26257/tcp cockroachdb-1.1.0wigdh8lx0ylhuzm4on9bbldq - ~~~ - -2. Use the `sudo docker exec` command to open the built-in SQL shell in interactive mode inside the container: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker exec -it 9539871cc769 ./cockroach sql --insecure - ~~~ - -3. Create an `insecurenodetest` database: - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE DATABASE insecurenodetest; - ~~~ - -4. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 7. Monitor the cluster - -To view your cluster's Admin UI, open a browser and go to `http://:8080`. - -{{site.data.alerts.callout_info}}It's possible to access the Admin UI from outside of the swarm because you published port 8080 externally in the first node's service definition.{{site.data.alerts.end}} - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - -2. Click the **Databases** tab on the left to verify that `insecurenodetest` is listed. - -## Step 8. Simulate node failure - -Since we have three service definitions, one for each node, Docker Swarm will ensure that there are three nodes running at all times. If a node fails, Docker Swarm will automatically create another node with the same network identity and storage. - -To see this in action: - -1. On any instance, use the `sudo docker ps` command to get the ID of the container running the CockroachDB node: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker ps | grep cockroachdb - ~~~ - - ~~~ - 9539871cc769 cockroachdb/cockroach:{{page.release_info.version}} "/cockroach/cockroach" 10 minutes ago Up 10 minutes 8080/tcp, 26257/tcp cockroachdb-0.1.0wigdh8lx0ylhuzm4on9bbldq - ~~~ - -2. Use `sudo docker kill` to remove the container, which implicitly stops the node: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker kill 9539871cc769 - ~~~ - -3. Verify that the node was restarted in a new container: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker ps | grep cockroachdb - ~~~ - - ~~~ - 4a58f86e3ced cockroachdb/cockroach:{{page.release_info.version}} "/cockroach/cockroach" 7 seconds ago Up 1 seconds 8080/tcp, 26257/tcp cockroachdb-0.1.cph86kmhhcp8xzq6a1nxtk9ng - ~~~ - -4. Back in the Admin UI, click **View nodes list** on the right and verify that all 3 nodes are live. - -## Step 9. Scale the cluster - -To increase the number of nodes in your CockroachDB cluster: - -1. Create an additional instance (see [Step 1](#step-1-create-instances)). -2. Install Docker Engine on the instance (see [Step 2](#step-2-install-docker-engine)). -3. Join the instance to the swarm as a worker node (see [Step 3.2](#step-3-start-the-swarm)). -4. Create a new service to start another node and join it to the CockroachDB cluster (see [Step 5.2](#step-5-start-the-cockroachdb-cluster)). - -## Step 10. Stop the cluster - -To stop the CockroachDB cluster, on the instance running your manager node, remove the services: - -{% include copy-clipboard.html %} -~~~ shell -$ sudo docker service rm cockroachdb-0 cockroachdb-1 cockroachdb-2 -~~~ - -~~~ -cockroachdb-0 -cockroachdb-1 -cockroachdb-2 -~~~ - -You may want to remove the persistent volumes used by the services as well. To do this, on each instance: - -{% include copy-clipboard.html %} -~~~ shell -# Identify the name of the local volume: -$ sudo docker volume ls -~~~ - -~~~ -cockroachdb-0 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -# Remove the local volume: -$ sudo docker volume rm cockroachdb-0 -~~~ - -## See Also - -- [Orchestrate CockroachDB with Kubernetes](orchestrate-cockroachdb-with-kubernetes.html) -- [Cloud Deployment](cloud-deployment.html) -- [Manual Deployment](manual-deployment.html) -- [Local Deployment](start-a-local-cluster.html) diff --git a/src/current/v1.0/orchestrate-cockroachdb-with-docker-swarm.md b/src/current/v1.0/orchestrate-cockroachdb-with-docker-swarm.md deleted file mode 100644 index cda7213a79d..00000000000 --- a/src/current/v1.0/orchestrate-cockroachdb-with-docker-swarm.md +++ /dev/null @@ -1,586 +0,0 @@ ---- -title: Orchestrate CockroachDB with Docker Swarm -summary: How to orchestrate the deployment and management of a secure three-node CockroachDB cluster as a Docker swarm. -toc: true ---- - -
    - - -
    - -This page shows you how to orchestrate the deployment and management of a secure three-node CockroachDB cluster as a [swarm of Docker Engines](https://docs.docker.com/engine/swarm/). - -If you are only testing CockroachDB, or you are not concerned with protecting network communication with TLS encryption, you can use an insecure cluster instead. Select **Insecure** above for instructions. - - -## Before You Begin - -Before you begin, it's helpful to review some terminology: - -Feature | Description ---------|------------ -instance | A physical or virtual machine. In this tutorial, you'll use three, one per CockroachDB node. -[Docker Engine](https://docs.docker.com/engine/) | This is the core Docker application that creates and runs containers. In this tutorial, you'll install and start Docker Engine on each of your three instances. -[swarm](https://docs.docker.com/engine/swarm/key-concepts/#/swarm) | A swarm is a group of Docker Engines joined into a single, virtual host. -[swarm node](https://docs.docker.com/engine/swarm/how-swarm-mode-works/nodes/) | Each member of a swarm is considered a node. In this tutorial, each instance will be a swarm node, one as the master node and the two others as worker nodes. You'll submit service definitions to the master node, which will dispatch work to the worker nodes. -[service](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) | A service is the definition of the tasks to execute on swarm nodes. In this tutorial, you'll define three services, each starting a CockroachDB node inside a container and joining it into a single cluster. Each service also ensures a stable network identity on restart via a resolvable DNS name. -[secret](https://docs.docker.com/engine/swarm/secrets/) | A secret is Docker's mechanism for managing sensitive data that a container needs at runtime. Since CockroachDB uses TLS certificates to authenticate and encrypt inter-node and client/node communication, you'll create a secret per certificate and use the secrets in your services. -[overlay network](https://docs.docker.com/engine/userguide/networking/#/an-overlay-network-with-docker-engine-swarm-mode) | An overlay network enables communication between the nodes of a swarm. In this tutorial, you'll create an overlay network and use it in each of your services. - -## Step 1. Create instances - -Create three instances, one for each node of your cluster. - -- For GCE-specific instructions, read through step 2 of [Deploy CockroachDB on GCE](deploy-cockroachdb-on-google-cloud-platform-insecure.html). -- For AWS-specific instructions, read through step 2 of [Deploy CockroachDB on AWS](deploy-cockroachdb-on-aws-insecure.html). - -Be sure to configure your network to allow TCP communication on these ports: - -- `26257` for inter-node communication (i.e., working as a cluster) and connecting with applications -- `8080` for exposing your Admin UI - -## Step 2. Install Docker Engine - -On each instance: - -1. [Install and start Docker Engine](https://docs.docker.com/engine/installation/). - -2. Confirm that the Docker daemon is running in the background: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker version - ~~~ - -## Step 3. Start the swarm - -1. On the instance where you want to run your manager node, [initialize the swarm](https://docs.docker.com/engine/swarm/swarm-tutorial/create-swarm/). - - Take note of the output for `docker swarm init` as it includes the command you'll use in the next step. It should look like this: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker swarm init --advertise-addr 10.142.0.2 - ~~~ - - ~~~ - Swarm initialized: current node (414z67gr5cgfalm4uriu4qdtm) is now a manager - To add a worker to this swarm, run the following command - $ docker swarm join \ - --toke SWMTKN-1-5vwxyi6zl3cc62lqlhi1jrweyspi8wblh2i3qa7kv277fgy74n-e5eg5c7ioxypjxlt3rpqorh15 \ - 10.142.0.2:237 - To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. - ~~~ - -2. On the other two instances, [create a worker node joined to the swarm](https://docs.docker.com/engine/swarm/swarm-tutorial/add-nodes/) by running the `docker swarm join` command in the output from step 1, for example: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker swarm join \ - --to SWMTKN-1-5vwxyi6zl3cc62lqlhi1jrweyspi8wblh2i3qa7kv277fgy74n-e5eg5c7ioxypjxlt3rpqorh15 \ - 10.142.0.2:2377 - ~~~ - - ~~~ - This node joined a swarm as a worker. - ~~~ - -3. On the instance running your manager node, verify that your swarm is running: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker node ls - ~~~ - - ~~~ - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - 414z67gr5cgfalm4uriu4qdtm * instance-1 Ready Active Leader - ae144s35dx1p1lcegh6bblyed instance-2 Ready Active - aivjg2joxyvzvbksjsix27khy instance-3 Ready Active - ~~~ - -## Step 4. Create an overlay network - -On the instance running your manager node, create an overlay network so that the containers in your swarm can talk to each other: - -{% include copy-clipboard.html %} -~~~ shell -$ sudo docker network create --driver overlay cockroachdb -~~~ - -## Step 5. Create security resources - -A secure CockroachDB cluster uses TLS certificates for encrypted inter-node and client/node authentication and communication. In this step, you'll install CockroachDB on the instance running your manager node, use the [`cockroach cert`](create-security-certificates.html) command to generate certificate authority (CA), node, and client certificate and key pairs, and use the [`docker secret create`](https://docs.docker.com/engine/reference/commandline/secret_create/) command to assign these files to Docker [secrets](https://docs.docker.com/engine/swarm/secrets/) for use by your Docker services. - -1. On the instance running your manager node, install CockroachDB from our latest binary: - - {% include copy-clipboard.html %} - ~~~ shell - # Get the latest CockroachDB tarball: - $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Extract the binary: - $ tar -xzf cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ - --strip=1 cockroach-{{ page.release_info.version }}.linux-amd64/cockroach - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Move the binary: - $ sudo mv cockroach /usr/local/bin/ - ~~~ - -2. Create a `certs` directory and a safe directory to keep your CA key: - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir certs - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ mkdir my-safe-directory - ~~~ - -3. Create the CA certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-ca \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ ls certs - ~~~ - - ~~~ - ca.crt - ~~~ - -4. Create a Docker secret for the `ca.crt` file using the [`docker secret create`](https://docs.docker.com/engine/reference/commandline/secret_create/) command: - - {{site.data.alerts.callout_danger}}Store the ca.key file somewhere safe and keep a backup; if you lose it, you will not be able to add new nodes or clients to your cluster.{{site.data.alerts.end}} - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create ca-crt certs/ca.crt - ~~~ - - This command assigns a name to the secret (`ca-crt`) and identifies the location of the cockroach-generated CA certificate file. You can use a different secret name, if you like, but be sure to reference the correct name when starting the CockroachDB nodes in the next step. - -5. Create the certificate and key for the first node: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node \ - cockroachdb-1 \ - localhost \ - 127.0.0.1 \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ ls certs - ~~~ - - ~~~ - ca.crt - node.crt - node.key - ~~~ - - This command issues the certificate/key pair to the service name you will use for the node later (`cockroachdb-1`) as well as to local addresses that will make it easier to run the built-in SQL shell and other CockroachDB client commands in the same container as the node. - -6. Create Docker secrets for the first node's certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create cockroachdb-1-crt certs/node.crt - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create cockroachdb-1-key certs/node.key - ~~~ - - Again, these commands assign names to the secrets (`cockroachdb-1-crt` and `cockroachdb-1-key`) and identify the location of the cockroach-generated certificate and key files. - -7. Create the certificate and key for the second node, using the `--overwrite` flag to replace the files created for the first node: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node --overwrite \ - cockroachdb-2 \ - localhost \ - 127.0.0.1 \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ ls certs - ~~~ - - ~~~ - ca.crt - node.crt - node.key - ~~~ - -8. Create Docker secrets for the second node's certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create cockroachdb-2-crt certs/node.crt - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create cockroachdb-2-key certs/node.key - ~~~ - -9. Create the certificate and key for the third node, again using the `--overwrite` flag to replace the files created for the second node: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-node --overwrite \ - cockroachdb-3 \ - localhost \ - 127.0.0.1 \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ ls certs - ~~~ - - ~~~ - ca.crt - node.crt - node.key - ~~~ - -10. Create Docker secrets for the third node's certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create cockroachdb-3-crt certs/node.crt - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create cockroachdb-3-key certs/node.key - ~~~ - -11. Create a client certificate and key for the `root` user: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach cert create-client \ - root \ - --certs-dir=certs \ - --ca-key=my-safe-directory/ca.key - ~~~ - -12. Create Docker secrets for the `root` user's certificate and key: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create cockroachdb-root-crt certs/client.root.crt - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker secret create cockroachdb-root-key certs/client.root.key - ~~~ - -## Step 6. Start the CockroachDB cluster - -1. On the instance running your manager node, create the first service that the others will join to: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker service create \ - --replicas 1 \ - --name cockroachdb-1 \ - --hostname cockroachdb-1 \ - --network cockroachdb \ - --mount type=volume,source=cockroachdb-1,target=/cockroach/cockroach-data,volume-driver=local \ - --stop-grace-period 60s \ - --publish 8080:8080 \ - --secret source=ca-crt,target=ca.crt \ - --secret source=cockroachdb-1-crt,target=node.crt \ - --secret source=cockroachdb-1-key,target=node.key,mode=0600 \ - --secret source=cockroachdb-root-crt,target=client.root.crt \ - --secret source=cockroachdb-root-key,target=client.root.key,mode=0600 \ - cockroachdb/cockroach:{{page.release_info.version}} start \ - --logtostderr \ - --certs-dir=/run/secrets - ~~~ - - This command creates a service that starts a container securely, joins it to the overlay network, and starts the first CockroachDB node inside the container mounted to a local volume for persistent storage. Let's look at each part: - - `sudo docker service create`: The Docker command to create a new service. - - `--replicas`: The number of containers controlled by the service. Since each service will control one container running one CockroachDB node, this will always be `1`. - - `--name`: The name for the service. - - `--hostname`: The hostname of the container. It will listen for connections on this address. - - `--network`: The overlay network for the container to join. See [Step 4. Create an overlay network](#step-4-create-an-overlay-network) for more details. - - `--mount`: This flag mounts a local volume called `cockroachdb-1`. This means that data and logs for the node running in this container will be stored in `/cockroach/cockroach-data` on the instance and will be reused on restart as long as restart happens on the same instance, which is not guaranteed. - {{site.data.alerts.callout_info}}If you plan on replacing or adding instances, it's recommended to use remote storage instead of local disk. To do so, create a remote volume for each CockroachDB instance using the volume driver of your choice, and then specify that volume driver instead of the volume-driver=local part of the command above, e.g., volume-driver=gce if using the GCE volume driver.{{site.data.alerts.end}} - - `--stop-grace-period`: This flag sets a grace period to give CockroachDB enough time to shut down gracefully, when possible. - - `--publish`: This flag makes the Admin UI accessible at the IP of any instance running a swarm node on port `8080`. Note that, even though this flag is defined only in the first node's service, the swarm exposes this port on every swarm node using a routing mesh. See [Publishing ports](https://docs.docker.com/engine/swarm/services/#publish-ports) for more details. - - `--secret`: These flags identify the secrets to use in securing the node. They must reference the secret names defined in step 5. For the node and client certificate and key secrets, the `source` field identifies the relevant secret, and the `target` field defines the name to be used in `cockroach start` and `cockroach sql` flags. For the node and client key secrets, the `mode` field also sets the file permissions to `0600`; if this isn't set, Docker will assign a default file permission of `0444`, which will not work with CockroachDB's built-in SQL client. - - `cockroachdb/cockroach:{{page.release_info.version}} start ...`: The CockroachDB command to [start a node](start-a-node.html) in the container, instruct other cluster members to talk to it using its persistent network address, `cockroachdb-1`, and to use the relevant Docker secrets to authenticate and encrypt communication. - -2. On the same instance, create the services to start two other CockroachDB nodes and join them to the cluster: - - {% include copy-clipboard.html %} - ~~~ shell - # Create the second service: - $ sudo docker service create \ - --replicas 1 \ - --name cockroachdb-2 \ - --hostname cockroachdb-2 \ - --network cockroachdb \ - --stop-grace-period 60s \ - --mount type=volume,source=cockroachdb-2,target=/cockroach/cockroach-data,volume-driver=local \ - --secret source=ca-crt,target=ca.crt \ - --secret source=cockroachdb-2-crt,target=node.crt \ - --secret source=cockroachdb-2-key,target=node.key,mode=0600 \ - --secret source=cockroachdb-root-crt,target=client.root.crt \ - --secret source=cockroachdb-root-key,target=client.root.key,mode=0600 \ - cockroachdb/cockroach:{{page.release_info.version}} start \ - --logtostderr \ - --certs-dir=/run/secrets \ - --join=cockroachdb-1:26257 - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - # Create the third service: - $ sudo docker service create \ - --replicas 1 \ - --name cockroachdb-3 \ - --hostname cockroachdb-3 \ - --network cockroachdb \ - --mount type=volume,source=cockroachdb-3,target=/cockroach/cockroach-data,volume-driver=local \ - --stop-grace-period 60s \ - --secret source=ca-crt,target=ca.crt \ - --secret source=cockroachdb-3-crt,target=node.crt \ - --secret source=cockroachdb-3-key,target=node.key,mode=0600 \ - --secret source=cockroachdb-root-crt,target=client.root.crt \ - --secret source=cockroachdb-root-key,target=client.root.key,mode=0600 \ - cockroachdb/cockroach:{{page.release_info.version}} start \ - --logtostderr \ - --certs-dir=/run/secrets \ - --join=cockroachdb-1:26257 - ~~~ - - There are only a few differences when creating the second two services: - - The `--name` and `--secret` flags are unique for each service. - - The CockroachDB command to [`start`](start-a-node.html) each node uses the `--join` flag to connect it to the cluster via the name of the first service and its default port, `cockroachdb-1:26257`. - -3. Verify that all three services were created successfully: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker service ls - ~~~ - - ~~~ - ID NAME MODE REPLICAS IMAGE - a6g0ur6857j6 cockroachdb-1 replicated 1/1 cockroachdb/cockroach:{{page.release_info.version}} - dr81a756gaa6 cockroachdb-2 replicated 1/1 cockroachdb/cockroach:{{page.release_info.version}} - il4m7op1afg9 cockroachdb-3 replicated 1/1 cockroachdb/cockroach:{{page.release_info.version}} - ~~~ - - {{site.data.alerts.callout_success}}The service definitions tell the CockroachDB nodes to log to stderr, so if you ever need access to a node's logs for troubleshooting, use sudo docker logs <container id> from the instance on which the container is running.{{site.data.alerts.end}} - -4. Remove the first service and recreate it again with the `--join` flag to ensure that, if the first node restarts, it will rejoin the original cluster via the second service, `cockroachdb-2`, instead of initiating a new cluster: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker service rm cockroachdb-1 - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker service create \ - --replicas 1 \ - --name cockroachdb-1 \ - --hostname cockroachdb-1 \ - --network cockroachdb \ - --mount type=volume,source=cockroachdb-1,target=/cockroach/cockroach-data,volume-driver=local \ - --stop-grace-period 60s \ - --publish 8080:8080 \ - --secret source=ca-crt,target=ca.crt \ - --secret source=cockroachdb-1-crt,target=node.crt \ - --secret source=cockroachdb-1-key,target=node.key,mode=0600 \ - --secret source=cockroachdb-root-crt,target=client.root.crt \ - --secret source=cockroachdb-root-key,target=client.root.key,mode=0600 \ - cockroachdb/cockroach:{{page.release_info.version}} start \ - --logtostderr \ - --certs-dir=/run/secrets \ - --join=cockroachdb-2:26257 - ~~~ - -## Step 7. Use the built-in SQL client - -1. On any instance, use the `docker ps` command to get the ID of the container running the CockroachDB node: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker ps | grep cockroachdb - ~~~ - - ~~~ shell - 9539871cc769 cockroachdb/cockroach:{{page.release_info.version}} "/cockroach/cockroach" 2 minutes ago Up 2 minutes 8080/tcp, 26257/tcp cockroachdb-1.1.0wigdh8lx0ylhuzm4on9bbldq - ~~~ - -2. Use the `docker exec` command to open the built-in SQL shell in interactive mode inside the container: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker exec -it \ - ./cockroach sql \ - --certs-dir=/run/secrets - ~~~ - - Because we included the `root` user's client certificate and key in the nodes' service definitions, we can use the client cert and key to start the [built-in SQL client](use-the-built-in-sql-client.html) securely in the same container as a node. - -3. Create a `securenodetest` database: - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE DATABASE securenodetest; - ~~~ - -4. Use **CTRL-D**, **CTRL-C**, or `\q` to exit the SQL shell. - -## Step 8. Monitor the cluster - -To view your cluster's Admin UI, open a browser and go to `https://:8080`. - -{{site.data.alerts.callout_info}}It's possible to access the Admin UI from outside of the swarm because you published port 8080 externally in the first node's service definition. However, your browser will consider the CockroachDB-created certificate invalid, so you’ll need to click through a warning message to get to the UI.{{site.data.alerts.end}} - -On this page, verify that the cluster is running as expected: - -1. Click **View nodes list** on the right to ensure that all of your nodes successfully joined the cluster. - -2. Click the **Databases** tab on the left to verify that `securenodetest` is listed. - -## Step 9. Simulate node failure - -Since we have three service definitions, one for each node, Docker Swarm will ensure that there are three nodes running at all times. If a node fails, Docker Swarm will automatically create another node with the same network identity and storage. - -To see this in action: - -1. On any instance, use the `sudo docker ps` command to get the ID of the container running the CockroachDB node: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker ps | grep cockroachdb - ~~~ - - ~~~ - 32769a6dd664 cockroachdb/cockroach:{{page.release_info.version}} "/cockroach/cockroach" 10 minutes ago Up 10 minutes 8080/tcp, 26257/tcp cockroachdb-2.1.0wigdh8lx0ylhuzm4on9bbldq - ~~~ - -2. Use `sudo docker kill` to remove the container, which implicitly stops the node: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker kill - ~~~ - -3. Verify that the node was restarted in a new container: - - {% include copy-clipboard.html %} - ~~~ shell - $ sudo docker ps | grep cockroachdb - ~~~ - - ~~~ - 4a58f86e3ced cockroachdb/cockroach:{{page.release_info.version}} "/cockroach/cockroach" 7 seconds ago Up 1 seconds 8080/tcp, 26257/tcp cockroachdb-2.1.cph86kmhhcp8xzq6a1nxtk9ng - ~~~ - -4. Back in the Admin UI, click **View nodes list** on the right and verify that all 3 nodes are live. - -## Step 10. Scale the cluster - -To increase the number of nodes in your CockroachDB cluster: - -1. Create an additional instance (see [Step 1](#step-1-create-instances)). -2. Install Docker Engine on the instance (see [Step 2](#step-2-install-docker-engine)). -3. Join the instance to the swarm as a worker node (see [Step 3.2](#step-3-start-the-swarm)). -4. Create security resources for the node (see [Step 5.7 and 5.8](#step-5-create-security-resources)). -5. Create a new service to start another node and join it to the CockroachDB cluster (see [Step 6.2](#step-6-start-the-cockroachdb-cluster)). - -## Step 11. Stop the cluster - -To stop the CockroachDB cluster, on the instance running your manager node, remove the services: - -{% include copy-clipboard.html %} -~~~ shell -$ sudo docker service rm cockroachdb-1 cockroachdb-2 cockroachdb-3 -~~~ - -~~~ -cockroachdb-1 -cockroachdb-2 -cockroachdb-3 -~~~ - -You may want to remove the persistent volumes and secrets used by the services as well. To do this, on each instance: - -{% include copy-clipboard.html %} -~~~ shell -# Identify the name of the local volume: -$ sudo docker volume ls -~~~ - -~~~ -cockroachdb-1 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -# Remove the local volume: -$ sudo docker volume rm cockroachdb-1 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -# Identify the name of secrets: -$ sudo docker secrets ls -~~~ - -~~~ -ca-crt -cockroachdb-1-crt -cockroachdb-1-key -~~~ - -{% include copy-clipboard.html %} -~~~ shell -# Remove the secrets: -$ sudo docker secret rm ca-crt cockroachdb-1-crt cockroachdb-1-key -~~~ - -## See Also - -- [Orchestrate CockroachDB with Kubernetes](orchestrate-cockroachdb-with-kubernetes.html) -- [Cloud Deployment](cloud-deployment.html) -- [Manual Deployment](manual-deployment.html) -- [Local Deployment](start-a-local-cluster.html) diff --git a/src/current/v1.0/orchestrate-cockroachdb-with-kubernetes.md b/src/current/v1.0/orchestrate-cockroachdb-with-kubernetes.md deleted file mode 100644 index 87a9ba9c49d..00000000000 --- a/src/current/v1.0/orchestrate-cockroachdb-with-kubernetes.md +++ /dev/null @@ -1,366 +0,0 @@ ---- -title: Orchestrate CockroachDB with Kubernetes -summary: How to orchestrate the deployment and management of an insecure 3-node CockroachDB cluster with Kubernetes. -toc: true -canonical: /stable/deploy-cockroachdb-with-kubernetes ---- - -This page shows you how to orchestrate the deployment and management of an insecure 3-node CockroachDB cluster with [Kubernetes](http://kubernetes.io/), using the [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) feature. - -{{site.data.alerts.callout_danger}}Deploying an insecure cluster is not recommended for data in production. We'll update this page after improving the process to deploy secure clusters.{{site.data.alerts.end}} - - -## Step 1. Choose your deployment environment - -Choose the environment where you will run CockroachDB with Kubernetes. The instructions below will adjust based on your choice. - -
    - - -

    - -It might also be helpful to review some Kubernetes-specific terminology: - -
    - -Feature | Description ---------|------------ -instance | A physical or virtual machine. In this tutorial, you'll run a Kubernetes script from your local workstation that will create 4 GCE or AWS instances and join them into a single Kubernetes cluster. -[pod](http://kubernetes.io/docs/user-guide/pods/) | A pod is a group of one or more Docker containers. In this tutorial, each pod will run on a separate instance and contain one Docker container running a single CockroachDB node. You'll start with 3 pods and grow to 4. -[StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) | A StatefulSet is a group of pods treated as stateful units, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart. StatefulSets are considered stable as of Kubernetes version 1.9 after reaching beta in version 1.5. -[persistent volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) | A persistent volume is a piece of networked storage (Persistent Disk on GCE, Elastic Block Store on AWS) mounted into a pod. The lifetime of a persistent volume is decoupled from the lifetime of the pod that's using it, ensuring that each CockroachDB node binds back to the same storage on restart.

    This tutorial assumes that dynamic volume provisioning is available. When that is not the case, [persistent volume claims](http://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims) need to be created manually. - -
    - -
    - -Feature | Description ---------|------------ -[minikube](http://kubernetes.io/docs/getting-started-guides/minikube/) | This is the tool you'll use to run a single-node Kubernetes cluster inside a VM on your computer. -[pod](http://kubernetes.io/docs/user-guide/pods/) | A pod is a group of one of more Docker containers. In this tutorial, each pod will run on a separate instance and contain one Docker container running a single CockroachDB node. You'll start with 3 pods and grow to 4. -[StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) | A StatefulSet is a group of pods treated as stateful units, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart. StatefulSets are considered stable as of Kubernetes version 1.9 after reaching beta in version 1.5. -[persistent volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) | A persistent volume is a piece of local storage mounted into a pod. The lifetime of a persistent volume is decoupled from the lifetime of the pod that's using it, ensuring that each CockroachDB node binds back to the same storage on restart.

    When using `minikube`, persistent volumes are external temporary directories that endure until they are manually deleted or until the entire Kubernetes cluster is deleted. -[persistent volume claim](http://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims) | When pods are created (one per CockroachDB node), each pod will request a persistent volume claim to “claim” durable storage for its node. - -
    - -## Step 2. Install and start Kubernetes - -
    - -From your local workstation, install prerequisites and start a Kubernetes cluster as described in the Kubernetes documentation: - -- For GCE-specific instructions, see [Running Kubernetes on Google Compute Engine](https://v1-18.docs.kubernetes.io/docs/setup/production-environment/turnkey/gce/). -- For AWS-specific instructions, see [Running Kubernetes on AWS EC2](https://v1-18.docs.kubernetes.io/docs/setup/production-environment/turnkey/aws/) - -The heart of this step is running a Kubernetes script that creates 4 GCE or AWS instances and joins them into a single Kubernetes cluster, all from your local workstation. You'll run subsequent steps from your local workstation as well. - -
    - -
    - -Follow Kubernetes' [documentation](http://kubernetes.io/docs/getting-started-guides/minikube/) to install `minikube` and `kubectl` for your OS. Then start a local Kubernetes cluster: - -{% include copy-clipboard.html %} -~~~ shell -$ minikube start -~~~ - -~~~ -Starting local Kubernetes cluster... -Kubectl is now configured to use the cluster. -~~~ - -
    - -## Step 3. Start the CockroachDB cluster - -
    - -2. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml - ~~~ - -2. Use the `kubectl get` command to verify that the persistent volumes and corresponding claims were created successfully: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl get persistentvolumes - ~~~ - - ~~~ - NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE - pvc-52f51ecf-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-0 26s - pvc-52fd3a39-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-1 27s - pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s - ~~~ - -3. Wait a bit and then verify that three pods were created successfully. If you do not see three pods, wait longer and check again. - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl get pods - ~~~ - - ~~~ - NAME READY STATUS RESTARTS AGE - cockroachdb-0 1/1 Running 0 2m - cockroachdb-1 1/1 Running 0 2m - cockroachdb-2 1/1 Running 0 2m - ~~~ - -
    - -
    - -1. Use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml - ~~~ - -2. Use the `kubectl get` command to verify that the persistent volumes and corresponding claims were created successfully: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl get persistentvolumes - ~~~ - - ~~~ - NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE - pv0 1Gi RWO Bound default/datadir-cockroachdb-0 27s - pv1 1Gi RWO Bound default/datadir-cockroachdb-1 26s - pv2 1Gi RWO Bound default/datadir-cockroachdb-2 26s - ~~~ - -3. Wait a bit and then verify that three pods were created successfully. If you do not see three pods, wait longer and check again. - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl get pods - ~~~ - - ~~~ - NAME READY STATUS RESTARTS AGE - cockroachdb-0 1/1 Running 0 2m - cockroachdb-1 1/1 Running 0 2m - cockroachdb-2 1/1 Running 0 2m - ~~~ - -
    - -{{site.data.alerts.callout_success}}The StatefulSet configuration sets all CockroachDB nodes to write to stderr, so if you ever need access to a pod/node's logs to troubleshoot, use kubectl logs <podname> rather than checking the log on the pod itself.{{site.data.alerts.end}} - -## Step 4. Use the built-in SQL client - -1. Start the [built-in SQL client](use-the-built-in-sql-client.html) in a one-off interactive pod, using the `cockroachdb-public` hostname to access the CockroachDB cluster: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl run cockroachdb -it --image=cockroachdb/cockroach --rm --restart=Never \ - -- sql --insecure --host=cockroachdb-public - ~~~ - -2. Run some [CockroachDB SQL statements](sql-statements.html): - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE DATABASE bank; - ~~~ - - {% include copy-clipboard.html %} - ~~~ sql - > CREATE TABLE bank.accounts (id INT PRIMARY KEY, balance DECIMAL); - ~~~ - - {% include copy-clipboard.html %} - ~~~ sql - > INSERT INTO bank.accounts VALUES (1234, 10000.50); - ~~~ - - {% include copy-clipboard.html %} - ~~~ sql - > SELECT * FROM bank.accounts; - ~~~ - - ~~~ - +------+----------+ - | id | balance | - +------+----------+ - | 1234 | 10000.50 | - +------+----------+ - (1 row) - ~~~ - -4. When you're done with the SQL shell, use **CTRL-D**, **CTRL-C**, or `\q` to exit and delete the temporary pod. - -## Step 5. Simulate node failure - -Based on the `replicas: 3` line in the StatefulSet configuration, Kubernetes ensures that three pods/nodes are running at all times. If a pod/node fails, Kubernetes will automatically create another pod/node with the same network identity and persistent storage. - -To see this in action: - -1. Stop one of CockroachDB nodes: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl delete pod cockroachdb-2 - ~~~ - - ~~~ - pod "cockroachdb-2" deleted - ~~~ - -2. Verify that the pod was restarted: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl get pod cockroachdb-2 - ~~~ - ~~~ - NAME READY STATUS RESTARTS AGE - cockroachdb-2 0/1 ContainerCreating 0 3s - ~~~ - -3. Wait a bit and then verify that the pod is ready: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl get pod cockroachdb-2 - ~~~ - - ~~~ - NAME READY STATUS RESTARTS AGE - cockroachdb-2 1/1 Running 0 1m - ~~~ - -## Step 6. Scale the cluster - -
    - -The Kubernetes script created 4 nodes, one master and 3 workers. Pods get placed only on worker nodes, so to ensure that you do not have two pods on the same node (as recommended in our [production best practices](recommended-production-settings.html)), you need to add a new worker node and then edit your StatefulSet configuration to add another pod. - -1. Add a worker node: - - On GCE, resize your [Managed Instance Group](https://cloud.google.com/compute/docs/instance-groups/). - - On AWS, resize your [Auto Scaling Group](https://docs.aws.amazon.com/autoscaling/latest/userguide/as-manual-scaling.html). - -2. Use the `kubectl scale` command to add a pod to your StatefulSet: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl scale statefulset cockroachdb --replicas=4 - ~~~ - - ~~~ - statefulset "cockroachdb" scaled - ~~~ - -3. Verify that a fourth pod was added successfully: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl get pods - ~~~ - - ~~~ - NAME READY STATUS RESTARTS AGE - cockroachdb-0 1/1 Running 0 2h - cockroachdb-1 1/1 Running 0 2h - cockroachdb-2 1/1 Running 0 9m - cockroachdb-3 1/1 Running 0 46s - ~~~ - -
    - -
    - -To increase the number of pods in your cluster, use the `kubectl scale` command to alter the `replicas: 3` configuration for your StatefulSet: - -{% include copy-clipboard.html %} -~~~ shell -$ kubectl scale statefulset cockroachdb --replicas=4 -~~~ - -~~~ -statefulset "cockroachdb" scaled -~~~ - -Verify that a fourth pod was added successfully: - -{% include copy-clipboard.html %} -~~~ shell -$ kubectl get pods -~~~ - -~~~ -NAME READY STATUS RESTARTS AGE -cockroachdb-0 1/1 Running 0 2h -cockroachdb-1 1/1 Running 0 2h -cockroachdb-2 1/1 Running 0 9m -cockroachdb-3 1/1 Running 0 46s -~~~ - -
    - -## Step 7. Stop the cluster - -
    - -To shut down the CockroachDB cluster: - -1. Use the `kubectl delete` command to clean up all of the resources you created, including the logs and remote persistent volumes: - - {% include copy-clipboard.html %} - ~~~ shell - $ kubectl delete pods,statefulsets,services,persistentvolumeclaims,persistentvolumes,poddisruptionbudget \ - -l app=cockroachdb - ~~~ - -2. Run the `cluster/kube-down.sh` script in the `kubernetes` directory to stop Kubernetes. - -{{site.data.alerts.callout_danger}}If you stop Kubernetes without first deleting resources, the remote persistent volumes will still exist in your cloud project.{{site.data.alerts.end}} - -
    - -
    - -- **If you plan to restart the cluster**, use the `minikube stop` command. This shuts down the minikube virtual machine but preserves all the resources you created: - - {% include copy-clipboard.html %} - ~~~ shell - $ minikube stop - ~~~ - - ~~~ - Stopping local Kubernetes cluster... - Machine stopped. - ~~~ - - You can restore the cluster to its previous state with `minikube start`. - -- **If you do not plan to restart the cluster**, use the `minikube delete` command. This shuts down and deletes the minikube virtual machine and all the resources you created, including persistent volumes: - - {% include copy-clipboard.html %} - ~~~ shell - $ minikube delete - ~~~ - - ~~~ - Deleting local Kubernetes cluster... - Machine deleted. - ~~~ - - {{site.data.alerts.callout_success}}To retain logs, copy them from each pod's stderr before deleting the cluster and all its resources. To access a pod's standard error stream, run kubectl logs <podname>.{{site.data.alerts.end}} - -
    - -## See Also - -- [Orchestrate CockroachDB with Docker Swarm](orchestrate-cockroachdb-with-docker-swarm.html) -- [Cloud Deployment](cloud-deployment.html) -- [Manual Deployment](manual-deployment.html) -- [Local Deployment](start-a-local-cluster.html) diff --git a/src/current/v1.0/orchestration.md b/src/current/v1.0/orchestration.md deleted file mode 100644 index 2613882a171..00000000000 --- a/src/current/v1.0/orchestration.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Orchestration -summary: -toc: false -canonical: /stable/kubernetes-overview ---- - -Orchestration systems automate the deployment, scaling, and management of containerized applications. Combined with CockroachDB's [automated sharding](frequently-asked-questions.html#how-does-cockroachdb-scale) and [fault tolerance](frequently-asked-questions.html#how-does-cockroachdb-survive-failures), they have the potential to lower operator overhead to almost nothing. - -Use the following guides to run CockroachDB with popular open-source orchestration systems: - -- [Kubernetes](orchestrate-cockroachdb-with-kubernetes.html) -- [Docker Swarm](orchestrate-cockroachdb-with-docker-swarm.html) - -## See Also - -- [Cloud Deployment](cloud-deployment.html) -- [Manual Deployment](manual-deployment.html) -- [Monitoring](monitor-cockroachdb-with-prometheus.html) -- [Start a Local Cluster](start-a-local-cluster.html) diff --git a/src/current/v1.0/porting-postgres.md b/src/current/v1.0/porting-postgres.md deleted file mode 100644 index c1c78deb395..00000000000 --- a/src/current/v1.0/porting-postgres.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Porting from PostgreSQL -summary: Porting an application from PostgreSQL -toc: true ---- - -Although CockroachDB supports PostgreSQL syntax and drivers, it does not offer exact compatibility. This page documents the known list of differences between PostgreSQL and CockroachDB for identical input. That is, a SQL statement of the type listed here will behave differently than in PostgreSQL. Porting an existing application to CockroachDB will require changing these expressions. - -Note that some of these differences only apply to rare inputs, and so no change will be needed, even if the listed feature is being used. In these cases, it is safe to ignore the porting instructions. - -{{site.data.alerts.callout_info}}This document currently only covers how to rewrite SQL expressions. It does not discuss strategies for porting applications that use SQL features CockroachDB does not currently support, such as arrays or the ENUM type.{{site.data.alerts.end}} - - -### Overflow of `float` - -In PostgreSQL, the `float` type returns an error when it overflows or an expression would return Infinity: - -~~~ -postgres=# select 1e300::float * 1e10::float; -ERROR: value out of range: overflow -postgres=# select pow(0::float, -1::float); -ERROR: zero raised to a negative power is undefined -~~~ - -In CockroachDB, these expressions instead return Infinity: - -~~~ sql -SELECT 1e300::float * 1e10::float; -~~~ - -~~~ -+----------------------------+ -| 1e300::FLOAT * 1e10::FLOAT | -+----------------------------+ -| +Inf | -+----------------------------+ -~~~ - -~~~ sql -SELECT pow(0::float, -1::float); -~~~ - -~~~ -+---------------------------+ -| pow(0::FLOAT, - 1::FLOAT) | -+---------------------------+ -| +Inf | -+---------------------------+ -~~~ - -### Precedence of unary `~` - -In PostgreSQL, the unary `~` (bitwise not) operator has a low precedence. For example, the following query is parsed as `~ (1 + 2)` because `~` has a lower precedence than `+`: - -~~~ sql -SELECT ~1 + 2 -~~~ - -In CockroachDB, unary `~` has the same (high) precedence as unary `-`, so the above expression will be parsed as `(~1) + 2`. - -**Porting instructions:** Manually add parentheses around expressions that depend on the PostgreSQL behavior. - -### Precedence of bitwise operators - -In PostgreSQL, the operators `|` (bitwise OR), `#` (bitwise XOR), and `&` (bitwise AND) all have the same precedence. - -In CockroachDB, the precedence from highest to lowest is: `&`, `#`, `|`. - -**Porting instructions:** Manually add parentheses around expressions that depend on the PostgreSQL behavior. - -### Integer division - -In PostgreSQL, division of integers results in an integer. For example, the following query returns `1`, since the `1 / 2` is truncated to `0`: - -~~~ sql -SELECT 1 + 1 / 2 -~~~ - -In CockroachDB, integer division results in a `decimal`. CockroachDB instead provides the `//` operator to perform floor division. - -**Porting instructions:** Change `/` to `//` in integer division where the result must be an integer. - -### Shift argument modulo - -In PostgreSQL, the shift operators (`<<`, `>>`) sometimes modulo their second argument to the bit size of the underlying type. For example, the following query results in a `1` because the int type is 32 bits, and `32 % 32` is `0`, so this is the equivalent of `1 << 0`: - -~~~ sql -SELECT 1::int << 32 -~~~ - -In CockroachDB, no such modulo is performed. - -**Porting instructions:** Manually add a modulo to the second argument. Also note that CockroachDB's [`INT`](int.html) type is always 64 bits. For example: - -~~~ sql -SELECT 1::int << (x % 64) -~~~ diff --git a/src/current/v1.0/primary-key.md b/src/current/v1.0/primary-key.md deleted file mode 100644 index 4f3ea04bcc6..00000000000 --- a/src/current/v1.0/primary-key.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Primary Key Constraint -summary: The Primary Key constraint specifies that the columns can be used to uniquely identify rows in a table. -toc: true ---- - -The Primary Key [constraint](constraints.html) specifies that the constrained columns' values must uniquely identify each row. - -Unlike other constraints which have very specific uses, the Primary Key constraint *should be used for every table* because it provides an intrinsic structure to the table's data. This both makes it easier to understand, as well as improving query performance. - -{{site.data.alerts.callout_info}}A table's primary key can only be specified in the CREATE TABLE statement. It cannot be changed later using ALTER TABLE, though it is possible to go through a process to create a new table with the new primary key you want and then migrate the data.{{site.data.alerts.end}} - - -## Details - -- Tables can only have one primary key. -- To ensure each row has a unique identifier, the Primary Key constraint combines the properties of both the [Unique](unique.html) and [Not Null](not-null.html) constraints. The properties of both constraints are necessary to make sure each row's primary key columns contain distinct sets of values. - - - The properties of the Unique constraint ensure that each value is distinct from all other values. - - - However, because *NULL* values never equal other *NULL* values, the Unique constraint is not enough (two rows can appear the same if one of the values is *NULL*). To prevent the appearance of duplicated values, the Primary Key constraint also enforces the properties of the Not Null constraint. - -- The columns in the Primary Key constraint are used to create its `primary` [index](indexes.html), which CockroachDB uses by default to access the table's data. - - This index does not take up additional disk space (unlike secondary indexes, which do) because CockroachDB uses the `primary` index to structure the table's data in the key-value layer. For more information, see our blog post [SQL in CockroachDB: Mapping Table Data to Key-Value Storage](https://www.cockroachlabs.com/blog/sql-in-cockroachdb-mapping-table-data-to-key-value-storage/). - -- For optimal performance, we recommend defining a primary key for *every* table. - - If you create a table without defining a primary key, CockroachDB uses a unique identifier for each row, which it then uses for the `primary` index. Because you cannot meaningfully use this unique row identifier column to filter table data, it does not offer any performance optimization. This means you will always have improved performance by defining a primary key for a table. For more information, see our blog post [Index Selection in CockroachDB](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/). - -## Syntax - -Primary Key constraints can be defined at the [table level](#table-level). However, if you only want the constraint to apply to a single column, it can be applied at the [column level](#column-level). - -### Column Level - -{% include {{ page.version.version }}/sql/diagrams/primary_key_column_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_name` | The name of the Primary Key column. | -| `column_type` | The Primary Key column's [data type](data-types.html). | -| `column_constraints` | Any other column-level [constraints](constraints.html) you want to apply to this column. | -| `column_def` | Definitions for any other columns in the table. | -| `table_constraints` | Any table-level [constraints](constraints.html) you want to apply. | - -**Example** - -~~~ sql -> CREATE TABLE orders ( - order_id INT PRIMARY KEY, - order_date TIMESTAMP NOT NULL, - order_mode STRING(8), - customer_id INT, - order_status INT - ); -~~~ - -### Table Level - -{% include {{ page.version.version }}/sql/diagrams/primary_key_table_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_def` | Definitions for any other columns in the table. | -| `name` | The name you want to use for the constraint, which must be unique to its table and follow these [identifier rules](keywords-and-identifiers.html#identifiers). | -| `column_name` | The name of the column you want to use as the Primary Key.

    The order in which you list columns here affects the structure of the `primary` index.| -| `table_constraints` | Any other table-level [constraints](constraints.html) you want to apply. | - -**Example** - -~~~ sql -> CREATE TABLE IF NOT EXISTS inventories ( - product_id INT, - warehouse_id INT, - quantity_on_hand INT NOT NULL, - PRIMARY KEY (product_id, warehouse_id) - ); -~~~ - -## Usage Example - -~~~ sql -> CREATE TABLE IF NOT EXISTS inventories ( - product_id INT, - warehouse_id INT, - quantity_on_hand INT NOT NULL, - PRIMARY KEY (product_id, warehouse_id) - ); - -> INSERT INTO inventories VALUES (1, 1, 100); - -> INSERT INTO inventories VALUES (1, 1, 200); -~~~ -~~~ -pq: duplicate key value (product_id,warehouse_id)=(1,1) violates unique constraint "primary" -~~~ -~~~ sql -> INSERT INTO inventories VALUES (1, NULL, 100); -~~~ -~~~ -pq: null value in column "warehouse_id" violates not-null constraint -~~~ - -## See Also - -- [Constraints](constraints.html) -- [Check constraint](check.html) -- [Default Value constraint](default-value.html) -- [Foreign Key constraint](foreign-key.html) -- [Not Null constraint](not-null.html) -- [Unique constraint](unique.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) - diff --git a/src/current/v1.0/privileges.md b/src/current/v1.0/privileges.md deleted file mode 100644 index 555e85d75f0..00000000000 --- a/src/current/v1.0/privileges.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Privileges -summary: Privileges are granted to users at the database and table levels. They are not yet supported for other granularities such as columns or rows. -toc: true ---- - -In CockroachDB, privileges are granted to [users](create-and-manage-users.html) at the database and table levels. They are not yet supported for other granularities such as columns or rows. - -When a user connects to a database, either via the [built-in SQL client](use-the-built-in-sql-client.html) or a [client driver](install-client-drivers.html), CockroachDB checks the user's privileges for each statement executed. If the user does not have sufficient privileges for a statement, CockroachDB gives an error. - -For the privileges required by specific statements, see the documentation for the respective [SQL statement](sql-statements.html). - - -## Supported Privileges - -For a full list of supported privileges, see the [`GRANT`](grant.html) documentation. - -## Granting Privileges - -To grant privileges to a user, use the [`GRANT`](grant.html) statement, for example: - -~~~ sql -> GRANT SELECT, INSERT ON bank.accounts TO maxroach; -~~~ - -## Showing Privileges - -To show privileges granted to users, use the [`SHOW GRANTS`](show-grants.html) statement, for example: - -~~~ sql -> SHOW GRANTS ON DATABASE bank FOR maxroach; -~~~ - -## Revoking Privileges - -To revoke privileges from users, use the [`REVOKE`](revoke.html) statement, for example: - -~~~ sql -> REVOKE INSERT ON bank.accounts FROM maxroach; -~~~ - -## See Also - -- [Create & Manage Users](create-and-manage-users.html) -- [SQL Statements](sql-statements.html) \ No newline at end of file diff --git a/src/current/v1.0/query-behavior-troubleshooting.md b/src/current/v1.0/query-behavior-troubleshooting.md deleted file mode 100644 index b9e3c196166..00000000000 --- a/src/current/v1.0/query-behavior-troubleshooting.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Query Behavior Troubleshooting -summary: Learn how to troubleshoot issues with specific queries with CockroachDB -toc: true ---- - -If a query returns an unexpected result or takes longer than expected to process, this page will help you troubleshoot the issue. - - -## Correctness Issues - -If your queries return unexpected results, there are several possibilities: - -- You’ve encountered a [known limitation](https://github.com/cockroachdb/cockroach/issues?q=is%3Aopen+is%3Aissue+label%3Aknown-limitation), [UX surprise](https://github.com/cockroachdb/cockroach/issues?utf8=%E2%9C%93&q=is%3Aopen%20is%3Aissue%20label%3Aux-surprise) or other problem with [SQL semantics](https://github.com/cockroachdb/cockroach/issues?utf8=%E2%9C%93&q=is%3Aopen%20is%3Aissue%20label%3Asql-semantics). Feel free to leave a comment on the existing issue indicating that you’ve encountered a problem as well. -- Your application has a bug. It's always worthwhile to check and double-check your application’s logic before filing an issue. That said, you can always [reach out for support](support-resources.html). - - CockroachDB has a bug. Please [file an issue](file-an-issue.html). - -## Performance Issues - -If your queries take much longer than expected, there are a few things you can check into: - -- Review your deployment's monitoring. General network latency or partitioning events can affect query response times. - -- [Log and debug long-running queries/transactions](debug-and-error-logs.html#log-queries). - -If you're still unable to determine why the query executes slowly, please [file an issue](file-an-issue.html). - -## `bad connection` & `closed` Responses - -If you receive a response of `bad connection` or `closed`, this normally indicates that the node you connected to died. You can check this by connecting to another node in the cluster and running [`cockroach node status`](view-node-details.html#show-the-status-of-all-nodes). - -Once you find the downed node, you can check its [logs](debug-and-error-logs.html) (stored in `cockroach-data/logs` by default). - -Because this kind of behavior is entirely unexpected, you should [file an issue](file-an-issue.html). - -## Something Else? - -If we do not have a solution here, you can try using our other [support resources](support-resources.html), including: - -- [CockroachDB Community Forum](https://forum.cockroachlabs.com) -- [CockroachDB Community Slack](https://cockroachdb.slack.com) -- [StackOverflow](http://stackoverflow.com/questions/tagged/cockroachdb) -- [CockroachDB Support Portal](https://support.cockroachlabs.com) diff --git a/src/current/v1.0/recommended-production-settings.md b/src/current/v1.0/recommended-production-settings.md deleted file mode 100644 index cbf6f6b4eef..00000000000 --- a/src/current/v1.0/recommended-production-settings.md +++ /dev/null @@ -1,297 +0,0 @@ ---- -title: Recommended Production Settings -summary: Recommended settings for production deployments of CockroachDB. -toc: true -toc_not_nested: true ---- - -This page provides recommended settings for production deployments. - - -## Hardware - -Minimum recommendations: - -- For a replicated cluster, use at least 3 nodes to ensure availability if a single node fails (see [Cluster Topology](#cluster-topology) for more details). -- Each node should have sufficient CPU, RAM, network, and storage capacity to handle your workload, but the bare minimum is 1 CPU and 2 GB of RAM per node. More data, complex workloads, higher concurrency, and faster performance require additional resources. - -For best performance: - -- Use SSDs over HDDs. -- Use larger/more powerful nodes. Adding more CPU is usually more beneficial than adding more RAM. - -For best resiliency: - -- Use many smaller nodes instead of fewer larger ones. Recovery from a failed node is faster when data is spread across more nodes. -- Use [zone configs](configure-replication-zones.html) to increase the replication factor from 3 (the default) to 5. This is especially recommended if you are using local disks rather than a cloud providers' network-attached disks that are often replicated underneath the covers, because local disks have a greater risk of failure. You can do this for the [entire cluster](configure-replication-zones.html#edit-the-default-replication-zone) or for specific [databases](configure-replication-zones.html#create-a-replication-zone-for-a-database) or [tables](configure-replication-zones.html#create-a-replication-zone-for-a-table). - -## Cluster Topology - -When running a cluster with more than one node, each replica will be on a different node and a majority of replicas must remain available for the cluster to make progress. Therefore: - -- Use at least three nodes to ensure that a majority of replicas (2/3) remains available if a node fails. - -- Run each node on a separate machine. Since CockroachDB replicates across nodes, running more than one node per machine increases the risk of data loss if a machine fails. Likewise, if a machine has multiple disks or SSDs, run one node with multiple `--store` flags and not one node per disk. For more details about stores, see [Start a Node](start-a-node.html). - -- Configurations with odd numbers of replicas are more robust than those with even numbers. Configurations with three replicas and configurations with four replicas can each tolerate one node failure and still reach a majority (2/3 and 3/4 respectively), so the fourth replica doesn't add any extra fault-tolerance. To survive two simultaneous failures, you must have five replicas. - -- When replicating across datacenters, it's recommended to specify which datacenter each node is in using the `--locality` flag to ensure even replication (see this [example](configure-replication-zones.html#even-replication-across-datacenters) for more details). If some of your datacenters are much farther apart than others, [specifying multiple levels of locality (such as country and region) is recommended](configure-replication-zones.html#descriptive-attributes-assigned-to-nodes). - -- When replicating across datacenters, be aware that the round-trip latency between datacenters will have a direct effect on your database's performance, with cross-continent clusters performing noticeably worse than clusters in which all nodes are geographically close together. - -For details about controlling the number and location of replicas, see [Configure Replication Zones](configure-replication-zones.html). - -## Clock Synchronization - -CockroachDB needs moderately accurate time to preserve data consistency, so it's important to run [NTP](http://www.ntp.org/) or other clock synchronization software on each node. - -By default, CockroachDB's maximum allowed clock offset is 500ms. When a node detects that its clock offset, relative to other nodes, is half or more of the maximum allowed, it spontaneously shuts down. While [serializable consistency](https://en.wikipedia.org/wiki/Serializability) is maintained regardless of clock skew, skew outside the configured clock offset bounds can result in violations of single-key linearizability between causally dependent transactions. With NTP or other clock synchronization software running on each node, there's very little risk of ever exceeding the maximum offset and encountering such anomalies, and even on well-functioning hardware not running synchronization software, slow clock drift is most common, which CockroachDB handles safely. - -The one rare case to note is when a node's clock suddenly jumps beyond the maximum offset before the node detects it. Although extremely unlikely, this could occur, for example, when running CockroachDB inside a VM and the VM hypervisor decides to migrate the VM to different hardware with a different time. In this case, there can be a small window of time between when the node's clock becomes unsynchronized and when the node spontaneously shuts down. During this window, it would be possible for a client to read stale data and write data derived from stale reads. - -## Cache Size - -If you run multiple applications on the same machine as a CockroachDB node, you might consider manually setting the cache size instead of using the default 25% of available memory. - -To manually set the limit of the cache size, start the node using the [`--cache` flag](start-a-node.html#flags). For example, the following command limits a node's cache to 5GB: - -```shell -$ cockroach start --cache=5GB -``` - -## File Descriptors Limit - -CockroachDB can use a large number of open file descriptors, often more than is available by default. Therefore, please note the following recommendations. - -For each CockroachDB node: - -- At a **minimum**, the file descriptors limit must be 1956 (1700 per store plus 256 for networking). If the limit is below this threshold, the node will not start. -- It is **recommended** to set the file descriptors limit to unlimited; otherwise, the recommended limit is at least 15000 (10000 per store plus 5000 for networking). This higher limit ensures performance and accommodates cluster growth. -- When the file descriptors limit is not high enough to allocate the recommended amounts, CockroachDB allocates 10000 per store and the rest for networking; if this would result in networking getting less than 256, CockroachDB instead allocates 256 for networking and evenly splits the rest across stores. - -### Increase the File Descriptors Limit - - - -
    - - - -
    - -
    - -- [Yosemite and later](#yosemite-and-later) -- [Older versions](#older-versions) - -#### Yosemite and later - -To adjust the file descriptors limit for a single process in Mac OS X Yosemite and later, you must create a property list configuration file with the hard limit set to the recommendation mentioned [above](#file-descriptors-limit). Note that CockroachDB always uses the hard limit, so it's not technically necessary to adjust the soft limit, although we do so in the steps below. - -For example, for a node with 3 stores, we would set the hard limit to at least 35000 (10000 per store and 5000 for networking) as follows: - -1. Check the current limits: - - ~~~ shell - $ launchctl limit maxfiles - maxfiles 10240 10240 - ~~~ - - The last two columns are the soft and hard limits, respectively. If `unlimited` is listed as the hard limit, note that the hidden default limit for a single process is actually 10240. - -2. Create `/Library/LaunchDaemons/limit.maxfiles.plist` and add the following contents, with the final strings in the `ProgramArguments` array set to 35000: - - ~~~ xml - - - - - Label - limit.maxfiles - ProgramArguments - - launchctl - limit - maxfiles - 35000 - 35000 - - RunAtLoad - - ServiceIPC - - - - ~~~ - - Make sure the plist file is owned by `root:wheel` and has permissions `-rw-r--r--`. These permissions should be in place by default. - -3. Restart the system for the new limits to take effect. - -4. Check the current limits: - - ~~~ shell - $ launchctl limit maxfiles - maxfiles 35000 35000 - ~~~ - -#### Older versions - -To adjust the file descriptors limit for a single process in OS X versions earlier than Yosemite, edit `/etc/launchd.conf` and increase the hard limit to the recommendation mentioned [above](#file-descriptors-limit). Note that CockroachDB always uses the hard limit, so it's not technically necessary to adjust the soft limit, although we do so in the steps below. - -For example, for a node with 3 stores, we would set the hard limit to at least 35000 (10000 per store and 5000 for networking) as follows: - -1. Check the current limits: - - ~~~ shell - $ launchctl limit maxfiles - maxfiles 10240 10240 - ~~~ - - The last two columns are the soft and hard limits, respectively. If `unlimited` is listed as the hard limit, note that the hidden default limit for a single process is actually 10240. - -2. Edit (or create) `/etc/launchd.conf` and add a line that looks like the following, with the last value set to the new hard limit: - - ~~~ - limit maxfiles 35000 35000 - ~~~ - -3. Save the file, and restart the system for the new limits to take effect. - -4. Verify the new limits: - - ~~~ shell - $ launchctl limit maxfiles - maxfiles 35000 35000 - ~~~ - -
    - -
    - -- [Standard](#standard) -- [With Systemd](#with-systemd) - -#### Standard - -To adjust the file descriptors limit for a single process on Linux, enable PAM user limits and set the hard limit to the recommendation mentioned [above](#file-descriptors-limit). Note that CockroachDB always uses the hard limit, so it's not technically necessary to adjust the soft limit, although we do so in the steps below. - -For example, for a node with 3 stores, we would set the hard limit to at least 35000 (10000 per store and 5000 for networking) as follows: - -1. Make sure the following line is present in both `/etc/pam.d/common-session` and `/etc/pam.d/common-session-noninteractive`: - - ~~~ shell - session required pam_limits.so - ~~~ - -2. Edit `/etc/security/limits.conf` and append the following lines to the file: - - ~~~ shell - * soft nofile 35000 - * hard nofile 35000 - ~~~ - - Note that `*` can be replaced with the username that will be running the CockroachDB server. - -4. Save and close the file. - -5. Restart the system for the new limits to take effect. - -6. Verify the new limits: - - ~~~ shell - $ ulimit -a - ~~~ - -#### With Systemd - -Alternately, if you're using [Systemd](https://en.wikipedia.org/wiki/Systemd): - -1. Edit the service definition to configure the maximum number of open files: - - ~~~ ini - [Service] - ... - LimitNOFILE=35000 - ~~~ - -2. Reload Systemd for the new limit to take effect: - - ~~~ shell - $ systemctl daemon-reload - ~~~ - -
    -
    - -CockroachDB does not yet provide a Windows binary. Once that's available, we will also provide documentation on adjusting the file descriptors limit on Windows. - -
    - -#### Attributions - -This section, "File Descriptors Limit", is a derivative of the chapter *Open File Limits* From the Riak LV 2.1.4 documentation, used under Creative Commons Attribution 3.0 Unported License. diff --git a/src/current/v1.0/release-savepoint.md b/src/current/v1.0/release-savepoint.md deleted file mode 100644 index 5ceb8d388d4..00000000000 --- a/src/current/v1.0/release-savepoint.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: RELEASE SAVEPOINT cockroach_restart -summary: Commit a transaction's changes once there are no retryable errors with the RELEASE SAVEPOINT cockroach_restart statement in CockroachDB. -toc: true ---- - -When using [client-side transaction retries](transactions.html#client-side-transaction-retries), the `RELEASE SAVEPOINT cockroach_restart` statement commits the transaction. - -If statements in the transaction [generated any non-retryable errors](transactions.html#error-handling), `RELEASE SAVEPOINT cockroach_restart` is equivalent to [`ROLLBACK`](rollback-transaction.html), which aborts the transaction and discards *all* updates made by its statements. - -Despite committing the transaction, you must still issue a [`COMMIT`](commit-transaction.html) statement to prepare the connection for the next transaction. - -{{site.data.alerts.callout_danger}}CockroachDB’s SAVEPOINT implementation only supports the cockroach_restart savepoint and does not support all savepoint functionality, such as nested transactions.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/release_savepoint.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to release a savepoint. However, privileges are required for each statement within a transaction. - -## Examples - -### Commit a Transaction - -After declaring `SAVEPOINT cockroach_restart`, commit the transaction with `RELEASE SAVEPOINT cockroach_restart` and then prepare the connection for the next transaction with `COMMIT`. - -~~~ sql -> BEGIN; - -> SAVEPOINT cockroach_restart; - -> UPDATE products SET inventory = 0 WHERE sku = '8675309'; - -> INSERT INTO orders (customer, sku, status) VALUES (1001, '8675309', 'new'); - -> RELEASE SAVEPOINT cockroach_restart; - -> COMMIT; -~~~ - -{{site.data.alerts.callout_danger}}This example assumes you're using client-side intervention to handle transaction retries.{{site.data.alerts.end}} - -## See Also - -- [Transactions](transactions.html) -- [`SAVEPOINT`](savepoint.html) -- [`ROLLBACK`](rollback-transaction.html) -- [`BEGIN`](begin-transaction.html) -- [`COMMIT`](commit-transaction.html) diff --git a/src/current/v1.0/rename-column.md b/src/current/v1.0/rename-column.md deleted file mode 100644 index 983753c7cfa..00000000000 --- a/src/current/v1.0/rename-column.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: RENAME COLUMN -summary: The RENAME COLUMN statement changes the name of a column in a table. -toc: true ---- - -The `RENAME COLUMN` [statement](sql-statements.html) changes the name of a column in a table. - -{{site.data.alerts.callout_info}}It is not possible to rename a column referenced by a view. For more details, see View Dependencies.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/rename_column.html %} - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `IF EXISTS` | Rename the column only if a column of `current_name` exists; if one does not exist, do not return an error. | -| `table_name` | The name of the table with the column you want to use. | -| `current_name` | The current name of the column. | -| `name` | The [`name`](sql-grammar.html#name) you want to use for the column, which must be unique to its table and follow these [identifier rules](keywords-and-identifiers.html#identifiers). | - -## Example - -### Rename a Column - -~~~ sql -> SELECT * FROM users; -~~~ -~~~ -+----+-------+-------+ -| id | name | title | -+----+-------+-------+ -| 1 | Tom | cat | -| 2 | Jerry | rat | -+----+-------+-------+ -~~~ -~~~ sql -> ALTER TABLE users RENAME COLUMN title TO species; -~~~ -~~~ sql -> SELECT * FROM users; -~~~ -~~~ -+----+-------+---------+ -| id | name | species | -+----+-------+---------+ -| 1 | Tom | cat | -| 2 | Jerry | rat | -+----+-------+---------+ -~~~ - -## See Also - -- [`RENAME DATABASE`](rename-database.html) -- [`RENAME TABLE`](rename-table.html) -- [`ALTER TABLE`](alter-table.html) diff --git a/src/current/v1.0/rename-database.md b/src/current/v1.0/rename-database.md deleted file mode 100644 index 5b727274e0f..00000000000 --- a/src/current/v1.0/rename-database.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: RENAME DATABASE -summary: The RENAME DATABASE statement changes the name of a database. -toc: true ---- - -The `RENAME DATABASE` [statement](sql-statements.html) changes the name of a database. - -{{site.data.alerts.callout_info}}It is not possible to rename a database referenced by a view. For more details, see View Dependencies.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/rename_database.html %} - -## Required Privileges - -Only the `root` user can rename databases. - -## Parameters - -Parameter | Description -----------|------------ -`name` | The first instance of `name` is the current name of the database. The second instance is the new name for the database. The new name [must be unique](#rename-fails-new-name-already-in-use) and follow these [identifier rules](keywords-and-identifiers.html#identifiers). - -## Examples - -### Rename a Database - -~~~ sql -> SHOW DATABASES; -~~~ -~~~ -+----------+ -| Database | -+----------+ -| db1 | -| db2 | -| system | -+----------+ -~~~ -~~~ sql -> ALTER DATABASE db1 RENAME TO db3; -~~~ -~~~ -RENAME DATABASE -~~~ -~~~ sql -> SHOW DATABASES; -~~~ -~~~ -+----------+ -| Database | -+----------+ -| db2 | -| db3 | -| system | -+----------+ -~~~ - -### Rename Fails (New Name Already In Use) - -~~~ sql -> SHOW DATABASES; -~~~ -~~~ -+----------+ -| Database | -+----------+ -| db2 | -| db3 | -| system | -+----------+ -~~~ -~~~ sql -> ALTER DATABASE db2 RENAME TO db3; -~~~ -~~~ -pq: the new database name "db3" already exists -~~~ - -## See Also - -- [`CREATE DATABASE`](create-database.html) -- [`SHOW DATABASES`](show-databases.html) -- [`SET DATABASE`](set-vars.html) -- [`DROP DATABASE`](drop-database.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/rename-index.md b/src/current/v1.0/rename-index.md deleted file mode 100644 index 87180add8a9..00000000000 --- a/src/current/v1.0/rename-index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: RENAME INDEX -summary: The RENAME INDEX statement changes the name of an index for a table. -toc: true ---- - -The `RENAME INDEX` [statement](sql-statements.html) changes the name of an index for a table. - -{{site.data.alerts.callout_info}}It is not possible to rename an index referenced by a view. For more details, see View Dependencies.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/rename_index.html %} - -## Required Privileges - -The user must have the `CREATE` [privilege](privileges.html) on the table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `IF EXISTS` | Rename the index only if an index `current_name` exists; if one does not exist, do not return an error. | -| `table_name` | The name of the table with the index you want to use | -| `index_name` | The current name of the index | -| `name` | The [`name`](sql-grammar.html#name) you want to use for the index, which must be unique to its table and follow these [identifier rules](keywords-and-identifiers.html#identifiers). | - -## Example - -### Rename an Index - -~~~ sql -> SHOW INDEXES FROM users; -~~~ -~~~ -+-------+----------------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+-------+----------------+--------+-----+--------+-----------+---------+----------+ -| users | primary | true | 1 | id | ASC | false | false | -| users | users_name_idx | false | 1 | name | ASC | false | false | -| users | users_name_idx | false | 2 | id | ASC | false | true | -+-------+----------------+--------+-----+--------+-----------+---------+----------+ -(3 rows) -~~~ -~~~ sql -> ALTER INDEX users@users_name_idx RENAME TO name_idx; -~~~ -~~~ -RENAME INDEX -~~~ -~~~ sql -> SHOW INDEXES FROM users; -~~~ -~~~ -+-------+----------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+-------+----------+--------+-----+--------+-----------+---------+----------+ -| users | primary | true | 1 | id | ASC | false | false | -| users | name_idx | false | 1 | name | ASC | false | false | -| users | name_idx | false | 2 | id | ASC | false | true | -+-------+----------+--------+-----+--------+-----------+---------+----------+ -(3 rows) -~~~ - -## See Also - -- [Indexes](indexes.html) -- [`CREATE INDEX`](create-index.html) -- [`RENAME COLUMN`](rename-column.html) -- [`RENAME DATABASE`](rename-database.html) -- [`RENAME TABLE`](rename-table.html) diff --git a/src/current/v1.0/rename-table.md b/src/current/v1.0/rename-table.md deleted file mode 100644 index b82c94b7cee..00000000000 --- a/src/current/v1.0/rename-table.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: RENAME TABLE -summary: The RENAME TABLE statement changes the name of a table. -toc: true ---- - -The `RENAME TABLE` [statement](sql-statements.html) changes the name of a table. It can also be used to move a table from one database to another. - -{{site.data.alerts.callout_info}}It is not possible to rename a table referenced by a view. For more details, see View Dependencies.{{site.data.alerts.end}} - - -## Required Privileges - -The user must have the `DROP` [privilege](privileges.html) on the table and the `CREATE` on the parent database. When moving a table from one database to another, the user must have the `CREATE` privilege on both the source and target databases. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/rename_table.html %} - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `IF EXISTS` | Rename the table only if a table with the current name exists; if one does not exist, do not return an error. | -| `current_name` | The current name of the table. | -| `new_name` | The new name of the table, which must be unique within its database and follow these [identifier rules](keywords-and-identifiers.html#identifiers). When the parent database is not set as the default, the name must be formatted as `database.name`.

    The [`UPSERT`](upsert.html) and [`INSERT ON CONFLICT`](insert.html) statements use a temporary table called `excluded` to handle uniqueness conflicts during execution. It's therefore not recommended to use the name `excluded` for any of your tables. | - - -## Examples - -### Rename a table - -~~~ sql -> SHOW TABLES FROM db1; -~~~ -~~~ -+--------+ -| Table | -+--------+ -| table1 | -| table2 | -+--------+ -~~~ -~~~ sql -> ALTER TABLE db1.table1 RENAME TO db1.tablea -~~~ -~~~ sql -> SHOW TABLES FROM db1; -~~~ -~~~ -+--------+ -| Table | -+--------+ -| table2 | -| tablea | -+--------+ -~~~ - -To avoid an error in case the table does not exist, you can include `IF EXISTS`: - -~~~ sql -> ALTER TABLE IF EXISTS db1.table1 RENAME TO db1.table2; -~~~ - -### Move a table - -To move a table from one database to another, use the above syntax but specify the source database after `ALTER TABLE` and the target database after `RENAME TO`: - -~~~ sql -> SHOW DATABASES; -~~~ -~~~ -+----------+ -| Database | -+----------+ -| db1 | -| db2 | -| system | -+----------+ -~~~ -~~~ sql -> SHOW TABLES FROM db1; -~~~ -~~~ -+--------+ -| Table | -+--------+ -| table2 | -| tablea | -+--------+ -~~~ -~~~ sql -> SHOW TABLES FROM db2; -~~~ -~~~ -+-------+ -| Table | -+-------+ -+-------+ -~~~ -~~~ sql -> ALTER TABLE db1.tablea RENAME TO db2.tablea -~~~ -~~~ sql -> SHOW TABLES FROM db1; -~~~ -~~~ -+--------+ -| Table | -+--------+ -| table2 | -+--------+ -~~~ -~~~ sql -> SHOW TABLES FROM db2; -~~~ -~~~ -+--------+ -| Table | -+--------+ -| tablea | -+--------+ -~~~ - -## See Also - -- [`CREATE TABLE`](create-table.html) -- [`ALTER TABLE`](alter-table.html) -- [`SHOW TABLES`](show-tables.html) -- [`DROP TABLE`](drop-table.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/restore-data.md b/src/current/v1.0/restore-data.md deleted file mode 100644 index 9b96ad9dbed..00000000000 --- a/src/current/v1.0/restore-data.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Restore Data -summary: Learn how to back up and restore a CockroachDB cluster. -toc: false ---- - -How you restore your cluster's data depends on the type of [backup](back-up-data.html) originally: - -Backup Type | Restore using... -------------|----------------- -[`cockroach dump`](sql-dump.html) | [Import data](import-data.html) -[`BACKUP`](backup.html)
    (*[enterprise license](https://www.cockroachlabs.com/pricing/) only*) | [`RESTORE`](restore.html) - -## See Also - -- [Back up Data](back-up-data.html) -- [Use the Built-in SQL Client](use-the-built-in-sql-client.html) -- [Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/restore.md b/src/current/v1.0/restore.md deleted file mode 100644 index 8c8ce6bccb3..00000000000 --- a/src/current/v1.0/restore.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: RESTORE -summary: Restore your CockroachDB cluster to a cloud storage services such as AWS S3, Google Cloud Storage, or other NFS. -toc: true ---- - -{{site.data.alerts.callout_danger}}The RESTORE feature is only available to our enterprise license only users. For non-enterprise restores, see Restore Data.{{site.data.alerts.end}} - -The `RESTORE` [statement](sql-statements.html) restores your cluster's schemas and data from [an enterprise license-generated backup](backup.html) stored on a services such as AWS S3, Google Cloud Storage, NFS, or HTTP storage. - -Because CockroachDB is designed with high fault tolerance, restores are designed primarily for disaster recovery, i.e., restarting your cluster if it loses a majority of its nodes. Isolated issues (such as small-scale node outages) do not require any intervention. - - -## Functional Details - -### Restore Targets - -You can restore entire tables (which automatically includes their indexes) or [views](views.html) from a backup. This process uses the data stored in the backup to create entirely new tables or views in the [target database](#target-database). - -The notion of "restoring a database" simply restores all of the tables and views that belong to the database, but does not create the database. For more information, see [Target Database](#target-database). - -{{site.data.alerts.callout_info}}RESTORE only offers table-level granularity; it does not support restoring subsets of a table.{{site.data.alerts.end}} - -Because this process is designed for disaster recovery, CockroachDB expects that the tables do not currently exist in the [target database](#target-database). This means the target database must have not have tables or views with the same name as the restored table or view. If any of the restore target's names are being used, you can: - -- [`DROP TABLE`](drop-table.html) or [`DROP VIEW`](drop-view.html) and then restore them. -- [Restore the table or view into a different database](#into_db). - -### Object Dependencies - -Dependent objects must be restored at the same time as the objects they depend on. - -Object | Depends On --------|----------- -Table with [foreign key](foreign-key.html) constraints | The table it `REFERENCES` (however, this dependency can be [removed during the restore](#skip_missing_foreign_keys)) -[Views](views.html) | The tables used in the view's `SELECT` statement -[Interleaved tables](interleave-in-parent.html) | The parent table in the [interleaved hierarchy](interleave-in-parent.html#interleaved-hierarchy) - -### Target Database - -By default, tables and views are restored into a database with the name of the database from which they were backed up. However, also consider: - -- You can choose to [change the target database](#into_db). -- If it no longer exists, you must [create the target database](create-database.html). - -The target database must have not have tables or views with the same name as the tables or views you're restoring. - -### Users and Privileges - -Table and view users/privileges are not restored. Restored tables and views instead inherit the privileges of the database into which they're restored. - -However, every backup includes `system.users`, so you can [restore users and their passwords](#restoring-users-from-system-users-backup). - -Table-level privileges must be [granted to users](grant.html) after the restore is complete. - -### Restore Types - -You can either restore from a full backup or from a full backup with incremental backups, based on the backup files you include. - -Restore Type | Parameters -----|---------- -**Full backup** | Include only the path to the full backup. -**Full backup +
    incremental backups** | Include the path to the full backup as the first argument and the subsequent incremental backups from oldest to newest as the following arguments. - -## Performance - -The `RESTORE` process minimizes its impact to the cluster's performance by distributing work to all nodes. Subsets of the restored data (known as ranges) are evenly distributed among randomly selected nodes, with each range initially restored to only one node. Once the range is restored, the node begins replicating it others. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/restore.html %} - -## Required Privileges - -Only the `root` user can run `RESTORE`. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `table_pattern` | The table or [view](views.html) you want to restore. | -| `full_backup_location` | The URL where the full backup is stored.

    For information about this URL structure, see [Backup File URLs](#backup-file-urls). | -| `incremental_backup_location` | The URL where an incremental backup is stored.

    Lists of incremental backups must be sorted from oldest to newest. The newest incremental backup's timestamp must be within the table's garbage collection period.

    For information about this URL structure, see [Backup File URLs](#backup-file-urls).

    For more information about garbage collection, see [Configure Replication Zones](configure-replication-zones.html#replication-zone-format). | -| `kv_option_list` | Control your backup's behavior with [these options](#restore-option-list). | - -### Backup File URLs - -The URL for your backup's locations must use the following format: - -{% include {{ page.version.version }}/misc/external-urls.md %} - -### Restore Option List - -You can include the following options as key-value pairs in the `kv_option_list` to control the restore process's behavior. - -#### `into_db` - -- **Description**: If you want to restore a table or view into a database other than the one it originally existed in, you can [change the target database](#restore-into-a-different-database). This is useful if you want to restore a table that currently exists, but do not want to drop it. -- **Key**: `into_db` -- **Value**: The name of the database you want to use -- **Example**: `WITH OPTIONS ('into_db' = 'newdb')` - -#### `skip_missing_foreign_keys` - -- **Description**: If you want to restore a table with a foreign key but do not want to restore the table it references, you can [drop the Foreign Key constraint from the table](#skip_missing_foreign_keys) and then have it restored. -- **Key**: `skip_missing_foreign_keys` -- **Value**: *No value* -- **Example**: `WITH OPTIONS ('skip_missing_foreign_keys')` - -## Examples - -### Restore a Single Table - -``` sql -> RESTORE bank.customers FROM 'azure://acme-co-backup/table-customer-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co'; -``` - -### Restore Multiple Tables - -``` sql -> RESTORE bank.customers, accounts FROM 'azure://acme-co-backup/tables-accounts-customers-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co'; -``` - -### Restore All Tables and Views from a Database - -``` sql -> RESTORE bank.* FROM 'azure://acme-co-backup/database-bank-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co'; -``` - -### Restore from Incremental Backups - -``` sql -> RESTORE bank.customers FROM 'azure://acme-co-backup/database-bank-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -, 'azure://acme-co-backup/database-bank-2017-03-28-incremental?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -, 'azure://acme-co-backup/database-bank-2017-03-29-incremental?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co'; -``` - -### Restore into a Different Database - -By default, tables and views are restored to the database they originally belonged to. However, using the [`into_db`](#into_db) option, you can control the target database. - -~~~ sql -> RESTORE bank.customers FROM 'azure://acme-co-backup/table-customer-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -WITH OPTIONS ('into_db' = 'newdb'); -~~~ - -### Remove the Foreign Key Before Restore - -By default, tables with [Foreign Key](foreign-key.html) constraints must be restored at the same time as the tables they reference. However, using the [`skip_missing_foreign_keys`](#skip_missing_foreign_keys) option you can remove the Foreign Key constraint from the table and then restore it. - -~~~ sql -> RESTORE bank.accounts FROM 'azure://acme-co-backup/table-customer-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -WITH OPTIONS ('skip_missing_foreign_keys'); -~~~ - -### Restoring Users from `system.users` Backup - -Every full backup contains the `system.users` table, which you can use to restore your cluster's usernames and their hashed passwords. However, to restore them, you must restore the `system.users` table into a new database because you cannot drop the existing `system.users` table. - -After it's restored into a new database, you can write the restored `users` table data to the cluster's existing `system.users` table. - -~~~ sql -> RESTORE system.users FROM 'azure://acme-co-backup/table-users-2017-03-27-full?AZURE_ACCOUNT_KEY=hash&AZURE_ACCOUNT_NAME=acme-co' -WITH OPTIONS ('into_db' = 'newdb'); - -> INSERT INTO system.users SELECT * FROM newdb.users; - -> DROP TABLE newdb.users; -~~~ - -## See Also - -- [`BACKUP`](backup.html) -- [Configure Replication Zones](configure-replication-zones.html) diff --git a/src/current/v1.0/revoke.md b/src/current/v1.0/revoke.md deleted file mode 100644 index 159984adf64..00000000000 --- a/src/current/v1.0/revoke.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: REVOKE -summary: The REVOKE statement revokes privileges from users. -toc: true ---- - -The `REVOKE` [statement](sql-statements.html) revokes [privileges](privileges.html) from users. - -For the list of privileges that can be granted to and revoked from users, see [`GRANT`](grant.html). - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/revoke.html %} - -## Required Privileges - -The user revoking privileges must have the `GRANT` privilege on the target databases or tables. - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | The name of the table for which you want to revoke privileges. To revoke privileges for multiple tables, use a comma-separated list of table names. To revoke privileges for all tables, use `*`. -`database_name` | The name of the database for which you want to revoke privileges. To revoke privileges for multiple databases, use a comma-separated list of database names.

    Privileges revoked for databases will be revoked for any new tables created in the databases. -`user_name` | The name of the users from whom you want to revoke privileges. To revoke privileges from multiple users, use a comma-separated list of [users](create-and-manage-users.html). - - -## Examples - -### Revoke Privileges on Databases - -~~~ sql -> SHOW GRANTS ON DATABASE db1, db2; -~~~ - -~~~ -+----------+------------+------------+ -| Database | User | Privileges | -+----------+------------+------------+ -| db1 | betsyroach | CREATE | -| db1 | maxroach | CREATE | -| db1 | root | ALL | -| db2 | betsyroach | CREATE | -| db2 | maxroach | CREATE | -| db2 | root | ALL | -+----------+------------+------------+ -(6 rows) -~~~ - -~~~ sql -> REVOKE CREATE ON DATABASE db1, db2 FROM maxroach, betsyroach; -~~~ - -~~~ sql -> SHOW GRANTS ON DATABASE db1, db2; -~~~ - -~~~ -+----------+------+------------+ -| Database | User | Privileges | -+----------+------+------------+ -| db1 | root | ALL | -| db2 | root | ALL | -+----------+------+------------+ -(2 rows) -~~~ - -{{site.data.alerts.callout_info}} Note that any tables that previously inherited the database-level privileges retain the privileges.{{site.data.alerts.end}} - -### Revoke Privileges on Specific Tables in a Database - -~~~ sql -> SHOW GRANTS ON TABLE db1.t1, db1.t2; -~~~ - -~~~ -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | betsyroach | CREATE | -| t1 | betsyroach | DELETE | -| t1 | maxroach | CREATE | -| t1 | root | ALL | -| t2 | betsyroach | CREATE | -| t2 | betsyroach | DELETE | -| t2 | maxroach | CREATE | -| t2 | root | ALL | -+-------+------------+------------+ -(8 rows) -~~~ - -~~~ sql -> REVOKE CREATE ON TABLE db1.t1, db1,t2 FROM betsyroach; -~~~ - -~~~ sql -> SHOW GRANTS ON TABLE db1.t1, db1.t2; -~~~ - -~~~ -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | betsyroach | DELETE | -| t1 | maxroach | CREATE | -| t1 | root | ALL | -| t2 | betsyroach | DELETE | -| t2 | maxroach | CREATE | -| t2 | root | ALL | -+-------+------------+------------+ -(6 rows) -~~~ - -### Revoke Privileges on All Tables in a Database - -~~~ sql -> SHOW GRANTS ON TABLE db2.t1, db2.t2; -~~~ - -~~~ -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | betsyroach | DELETE | -| t1 | root | ALL | -| t2 | betsyroach | DELETE | -| t2 | root | ALL | -+-------+------------+------------+ -(4 rows) -~~~ - -~~~ sql -> REVOKE DELETE ON db2.* FROM betsyroach; -~~~ - -~~~ -+-------+------+------------+ -| Table | User | Privileges | -+-------+------+------------+ -| t1 | root | ALL | -| t2 | root | ALL | -+-------+------+------------+ -(2 rows) -~~~ - -## See Also - -- [Privileges](privileges.html) -- [`GRANT`](grant.html) -- [`SHOW GRANTS`](show-grants.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/rollback-transaction.md b/src/current/v1.0/rollback-transaction.md deleted file mode 100644 index c604ad6ec84..00000000000 --- a/src/current/v1.0/rollback-transaction.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: ROLLBACK -summary: Abort the current transaction, discarding all updates made by statements included in the transaction with the ROLLBACK statement in CockroachDB. -toc: true ---- - -The `ROLLBACK` [statement](sql-statements.html) aborts the current [transaction](transactions.html), discarding all updates made by statements included in the transaction. - -When using [client-side transaction retries](transactions.html#client-side-transaction-retries), use `ROLLBACK TO SAVEPOINT cockroach_restart` to handle a transaction that needs to be retried (identified via the `40001` error code or `retry transaction` string in the error message), and then re-execute the statements you want the transaction to contain. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/rollback_transaction.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to rollback a transaction. However, privileges are required for each statement within a transaction. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `TO SAVEPOINT cockroach_restart` | If using [client-side transaction retries](transactions.html#client-side-transaction-retries), retry the transaction. You should execute this statement when a transaction returns a `40001` / `retry transaction` error. | - -## Example - -### Rollback a Transaction - -Typically, your application conditionally executes rollbacks, but you can see their behavior by using `ROLLBACK` instead of `COMMIT` directly through SQL. - -~~~ sql -> SELECT * FROM accounts; -~~~ -~~~ -+----------+---------+ -| name | balance | -+----------+---------+ -| Marciela | 1000 | -+----------+---------+ -~~~ -~~~ sql -> BEGIN; - -> UPDATE accounts SET balance = 2500 WHERE name = 'Marciela'; - -> ROLLBACK; - -> SELECT * FROM accounts; -~~~ -~~~ -+----------+---------+ -| name | balance | -+----------+---------+ -| Marciela | 1000 | -+----------+---------+ -~~~ - -### Retry a Transaction - -To use [client-side transaction retries](transactions.html#client-side-transaction-retries), your application must execute `ROLLBACK TO SAVEPOINT cockroach_restart` after detecting a `40001` / `retry transaction` error. - -~~~ sql -> ROLLBACK TO SAVEPOINT cockroach_restart; -~~~ - -For examples of retrying transactions in your application, check out the transaction code samples in our [Build an App with CockroachDB](build-an-app-with-cockroachdb.html) tutorials. - -## See Also - -- [Transactions](transactions.html) -- [`BEGIN`](begin-transaction.html) -- [`COMMIT`](commit-transaction.html) -- [`SAVEPOINT`](savepoint.html) -- [`RELEASE SAVEPOINT`](release-savepoint.html) diff --git a/src/current/v1.0/savepoint.md b/src/current/v1.0/savepoint.md deleted file mode 100644 index fb50dac988d..00000000000 --- a/src/current/v1.0/savepoint.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: SAVEPOINT -summary: Identify your intent to retry aborted transactions with the SAVEPOINT cockroach_restart statement in CockroachDB. -toc: true ---- - -The `SAVEPOINT cockroach_restart` statement defines the intent to retry [transactions](transactions.html) using the CockroachDB-provided function for client-side transaction retries. For more information, see [Transaction Retries](transactions.html#transaction-retries). - -{{site.data.alerts.callout_danger}}CockroachDB’s SAVEPOINT implementation only supports the cockroach_restart savepoint and does not support all savepoint functionality, such as nested transactions.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/savepoint.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to create a savepoint. However, privileges are required for each statement within a transaction. - -## Example - -### Create Savepoint - -After you `BEGIN` the transaction, you must create the savepoint to identify that if the transaction contends with another transaction for resources and "loses", you intend to use [the function for client-side transaction retries](transactions.html#transaction-retries). - -~~~ sql -> BEGIN; - -> SAVEPOINT cockroach_restart; - -> UPDATE products SET inventory = 0 WHERE sku = '8675309'; - -> INSERT INTO orders (customer, sku, status) VALUES (1001, '8675309', 'new'); - -> RELEASE SAVEPOINT cockroach_restart; - -> COMMIT; -~~~ - -When using `SAVEPOINT`, your application must also include functions to execute retries with [`ROLLBACK TO SAVEPOINT cockroach_restart`](rollback-transaction.html#retry-a-transaction). - -## See Also - -- [Transactions](transactions.html) -- [`RELEASE SAVEPOINT`](release-savepoint.html) -- [`ROLLBACK`](rollback-transaction.html) -- [`BEGIN`](begin-transaction.html) -- [`COMMIT`](commit-transaction.html) diff --git a/src/current/v1.0/secure-a-cluster.md b/src/current/v1.0/secure-a-cluster.md deleted file mode 100644 index 1a29463d508..00000000000 --- a/src/current/v1.0/secure-a-cluster.md +++ /dev/null @@ -1,300 +0,0 @@ ---- -title: Start a Local Cluster (Secure) -summary: Run a secure multi-node CockroachDB cluster locally, using TLS certificates to encrypt network communication. -toc: true ---- - - - -Once you’ve [installed CockroachDB](install-cockroachdb.html), it’s simple to start a secure multi-node cluster locally, using [TLS certificates](create-security-certificates.html) to encrypt network communication. - -{{site.data.alerts.callout_info}}Running multiple nodes on a single host is useful for testing out CockroachDB, but it's not recommended for production deployments. To run a physically distributed cluster in production, see Manual Deployment, Cloud Deployment, or Orchestration.{{site.data.alerts.end}} - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Create security certificates - -~~~ shell -# Create a certs directory and safe directory for the CA key. -# If using the default certificate directory (`${HOME}/.cockroach-certs`), make sure it is empty. -$ mkdir certs -$ mkdir my-safe-directory - -# Create the CA key pair: -$ cockroach cert create-ca \ ---certs-dir=certs \ ---ca-key=my-safe-directory/ca.key - -# Create a client key pair for the root user: -$ cockroach cert create-client \ -root \ ---certs-dir=certs \ ---ca-key=my-safe-directory/ca.key - -# Create a key pair for the nodes: -$ cockroach cert create-node \ -localhost \ -$(hostname) \ ---certs-dir=certs \ ---ca-key=my-safe-directory/ca.key -~~~ - -- The first command makes a new directory for the certificates. -- The second command creates the Certificate Authority (CA) certificate and key: `ca.crt` and `ca.key`. -- The third command creates the client certificate and key, in this case for the `root` user: `client.root.crt` and `client.root.key`. These files will be used to secure communication between the built-in SQL shell and the cluster (see step 4). -- The fourth command creates the node certificate and key: `node.crt` and `node.key`. These files will be used to secure communication between nodes. Typically, you would generate these separately for each node since each node has unique addresses; in this case, however, since all nodes will be running locally, you need to generate only one node certificate and key. - -## Step 2. Start the first node - -~~~ shell -$ cockroach start \ ---certs-dir=certs \ ---host=localhost \ ---http-host=localhost -~~~ - -~~~ -CockroachDB node starting at {{ now | date: "%Y-%m-%d %H:%M:%S.%6 +0000 UTC" }} -build: CCL {{page.release_info.version}} @ {{page.release_info.build_time}} -admin: https://ROACHs-MBP:8080 -sql: postgresql://root@ROACHs-MBP:26257?sslcert=%2FUsers%2F... -logs: cockroach-data/logs -store[0]: path=cockroach-data -status: restarted pre-existing node -clusterID: {dab8130a-d20b-4753-85ba-14d8956a294c} -nodeID: 1 -~~~ - -This command starts a node in secure mode, accepting most [`cockroach start`](start-a-node.html) defaults. - -- The `--certs-dir` directory points to the directory holding certificates and keys. -- Since this is a purely local cluster, `--host=localhost` tells the node to listens only on `localhost`, with default ports used for internal and client traffic (`26257`) and for HTTP requests from the Admin UI (`8080`). -- The Admin UI defaults to listening on all interfaces. The `--http-host` flag is therefore used to restrict Admin UI access to the specified interface, in this case, `localhost`. -- Node data is stored in the `cockroach-data` directory. -- The [standard output](start-a-node.html#standard-output) gives you helpful details such as the CockroachDB version, the URL for the admin UI, and the SQL URL for clients. - -{{site.data.alerts.callout_success}}By default, each node's cache is limited to 25% of available memory. This default is reasonable when running one node per host. When you run multiple nodes on a single host, however, this default may lead to out-of-memory errors, especially if you test in a serious way. To avoid such errors, you can limit each node's cache size by setting the --cache flag in the start command.{{site.data.alerts.end}} - -## Step 3. Add nodes to the cluster - -At this point, your cluster is live and operational. With just one node, you can already connect a SQL client and start building out your database. In real deployments, however, you'll always want 3 or more nodes to take advantage of CockroachDB's [automatic replication](demo-data-replication.html), [rebalancing](demo-automatic-rebalancing.html), and [fault tolerance](demo-fault-tolerance-and-recovery.html) capabilities. This step helps you simulate a real deployment locally. - -In a new terminal, add the second node: - -~~~ shell -$ cockroach start \ ---certs-dir=certs \ ---store=node2 \ ---host=localhost \ ---port=26258 \ ---http-port=8081 \ ---http-host=localhost \ ---join=localhost:26257 -~~~ - -In a new terminal, add the third node: - -~~~ shell -$ cockroach start \ ---certs-dir=certs \ ---store=node3 \ ---host=localhost \ ---port=26259 \ ---http-port=8082 \ ---http-host=localhost \ ---join=localhost:26257 -~~~ - -The main difference in these commands is that you use the `--join` flag to connect the new nodes to the cluster, specifying the address and port of the first node, in this case `localhost:26257`. Since you're running all nodes on the same machine, you also set the `--store`, `--port`, and `--http-port` flags to locations and ports not used by other nodes, but in a real deployment, with each node on a different machine, the defaults would suffice. - -## Step 4. Test the cluster - -Now that you've scaled to 3 nodes, you can use any node as a SQL gateway to the cluster. To demonstrate this, open a new terminal and connect the [built-in SQL client](use-the-built-in-sql-client.html) to node 1: - -{{site.data.alerts.callout_info}}The SQL client is built into the cockroach binary, so nothing extra is needed.{{site.data.alerts.end}} - -~~~ shell -$ cockroach sql \ ---certs-dir=certs -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -Run some basic [CockroachDB SQL statements](learn-cockroachdb-sql.html): - -~~~ sql -> CREATE DATABASE bank; - -> CREATE TABLE bank.accounts (id INT PRIMARY KEY, balance DECIMAL); - -> INSERT INTO bank.accounts VALUES (1, 1000.50); - -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -Exit the SQL shell on node 1: - -~~~ sql -> \q -~~~ - -Then connect the SQL shell to node 2, this time specifying the node's non-default port: - -~~~ shell -$ cockroach sql \ ---certs-dir=certs \ ---port=26258 -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -{{site.data.alerts.callout_info}}In a real deployment, all nodes would likely use the default port 26257, and so you wouldn't need to set the --port flag.{{site.data.alerts.end}} - -Now run the same `SELECT` query: - -~~~ sql -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -As you can see, node 1 and node 2 behaved identically as SQL gateways. - -Exit the SQL shell on node 2: - -~~~ sql -> \q -~~~ - -## Step 5. Monitor the cluster - -To access the [Admin UI](explore-the-admin-ui.html) for your cluster, point a browser to `https://localhost:8080`, or to the address in the `admin` field in the standard output of any node on startup. - -Note that your browser will consider the CockroachDB-created certificate invalid; you’ll need to click through a warning message to get to the UI. - -CockroachDB Admin UI - -As mentioned earlier, CockroachDB automatically replicates your data behind-the-scenes. To verify that data written in the previous step was replicated successfully, scroll down to the **Replicas per Node** graph and hover over the line: - -CockroachDB Admin UI - -The replica count on each node is identical, indicating that all data in the cluster was replicated 3 times (the default). - -{{site.data.alerts.callout_success}}For more insight into how CockroachDB automatically replicates and rebalances data, and tolerates and recovers from failures, see our replication, rebalancing, fault tolerance demos.{{site.data.alerts.end}} - -## Step 6. Stop the cluster - -Once you're done with your test cluster, switch to the terminal running the first node and press **CTRL-C** to stop the node. - -At this point, with 2 nodes still online, the cluster remains operational because a majority of replicas are available. To verify that the cluster has tolerated this "failure", connect the built-in SQL shell to nodes 2 or 3. You can do this in the same terminal or in a new terminal. - -~~~ shell -$ cockroach sql \ ---certs-dir=certs \ ---port=26258 -# Welcome to the cockroach SQL interface. -# All statements must be terminated by a semicolon. -# To exit: CTRL + D. -~~~ - -~~~ sql -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -Exit the SQL shell: - -~~~ sql -> \q -~~~ - -Now stop nodes 2 and 3 by switching to their terminals and pressing **CTRL-C**. - -{{site.data.alerts.callout_success}}For node 3, the shutdown process will take longer (about a minute) and will eventually force stop the node. This is because, with only 1 of 3 nodes left, a majority of replicas are not available, and so the cluster is no longer operational. To speed up the process, press CTRL-C a second time.{{site.data.alerts.end}} - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores: - -~~~ shell -$ rm -rf cockroach-data node2 node3 -~~~ - -## Step 7. Restart the cluster - -If you decide to use the cluster for further testing, you'll need to restart at least 2 of your 3 nodes from the directories containing the nodes' data stores. - -Restart the first node from the parent directory of `cockroach-data/`: - -~~~ shell -$ cockroach start \ ---certs-dir=certs \ ---host=localhost \ ---http-host=localhost -~~~ - -{{site.data.alerts.callout_info}}With only 1 node back online, the cluster will not yet be operational, so you will not see a response to the above command until after you restart the second node. -{{site.data.alerts.end}} - -In a new terminal, restart the second node from the parent directory of `node2/`: - -~~~ shell -$ cockroach start \ ---certs-dir=certs \ ---store=node2 \ ---host=localhost \ ---port=26258 \ ---http-port=8081 \ ---http-host=localhost \ ---join=localhost:26257 -~~~ - -In a new terminal, restart the third node from the parent directory of `node3/`: - -~~~ shell -$ cockroach start \ ---certs-dir=certs \ ---store=node3 \ ---host=localhost \ ---port=26259 \ ---http-port=8082 \ ---http-host=localhost \ ---join=localhost:26257 -~~~ - -## What's Next? - -- Learn more about [CockroachDB SQL](learn-cockroachdb-sql.html) and the [built-in SQL client](use-the-built-in-sql-client.html) -- [Install the client driver](install-client-drivers.html) for your preferred language -- [Build an app with CockroachDB](build-an-app-with-cockroachdb.html) -- [Explore core CockroachDB features](demo-data-replication.html) like automatic replication, rebalancing, and fault tolerance diff --git a/src/current/v1.0/select.md b/src/current/v1.0/select.md deleted file mode 100644 index e1ce7dc9553..00000000000 --- a/src/current/v1.0/select.md +++ /dev/null @@ -1,629 +0,0 @@ ---- -title: SELECT -summary: The SELECT statement retrieves data from a table. -toc: true ---- - -The `SELECT` [statement](sql-statements.html) retrieves data from a table. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/select.html %} - -{{site.data.alerts.callout_success}}SELECT also has other applications not covered here, such as executing functions like SELECT current_timestamp();.{{site.data.alerts.end}} - -## Required Privileges - -The user must have the `SELECT` [privilege](privileges.html) on the table. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `DISTINCT` | Retrieve no more than one copy of a value. | -| `target_elem` | The name of the column you want select (`*` to retrieve all columns), the [aggregate function](functions-and-operators.html#aggregate-functions) you want to perform, or the [value expression](sql-expressions.html) you want to use. | -| `AS col_label` | In the retrieved table, change the column label to `col_label`. | -| `table_ref` | The [table expression](table-expressions.html) you want to retrieve data from.| -| `index_name` | The name of the index you want to use, also known as "[index hints](#force-index-selection-index-hints)." Find index names using [`SHOW INDEX`](show-index.html).

    Forced index selection overrides [CockroachDB's index selection](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/). | -| `AS OF SYSTEM TIME timestamp` | Retrieve data as it existed as of [`timestamp`](as-of-system-time.html). For more information, see [this example](#select-historical-data-time-travel). | -| `WHERE a_expr` | Only retrieve rows that return `TRUE` for `a_expr`, which must be an expression that returns Boolean values using columns (e.g., ` = `). | -| `GROUP BY expr_list` | When using [aggregate functions](functions-and-operators.html#aggregate-functions) in `target_elem` or `HAVING`, list the column groupings in `expr_list`. | -| `HAVING a_expr` | Only retrieve aggregate function groups that return `TRUE` for `a_expr`, which must be an expression that returns Boolean values using an aggregate function (e.g., ` = `).

    `HAVING` works like the `WHERE` clause, but for aggregate functions. | -| `UNION` | Combine the retrieved rows from the preceding and following `SELECT` statements. Returns distinct values.| -| `INTERSECT` | Only retrieve rows that exist in both the preceding and following `SELECT` statements. Returns distinct values. | -| `EXCEPT` | Only retrieve rows that are in the preceding `SELECT` statement but not in the following `SELECT` statement. Returns distinct values.| -| `ALL` | Include duplicate rows in the returned values of `UNION`, `INTERSECT`, or `EXCEPT`. | -| `ORDER BY sortby_list` | Sort retrieved rows in the order of comma-separated column names you include in `sortby_list`. You can optionally specify `ASC` or `DESC` order for each column.

    When ORDER BY is not included in a query, rows are not sorted by any consistent criteria. Instead, CockroachDB returns them as the coordinating node receives them.| -| `LIMIT limit_val` | Only retrieve `limit_val` number of rows. | -| `OFFSET offset_val` | Do not include the first `offset_value` number of rows.

    `OFFSET` is often used in conjunction with `LIMIT` to "paginate" through retrieved rows. | - -## Examples - -### Choose Columns - -#### Retrieve Specific Columns - -Retrieve specific columns by naming them in a comma-separated list. - -~~~ sql -> SELECT id, name, balance -FROM accounts; -~~~ -~~~ -+----+-----------------------+---------+ -| id | name | balance | -+----+-----------------------+---------+ -| 1 | Bjorn Fairclough | 1200 | -| 2 | Bjorn Fairclough | 2500 | -| 3 | Arturo Nevin | 250 | -[ truncated ] -+----+-----------------------+---------+ -~~~ - -#### Retrieve All Columns - -Retrieve all columns by using `*`. - -~~~ sql -> SELECT * -FROM accounts; -~~~ -~~~ -+----+-----------------------+---------+----------+--------------+ -| id | name | balance | type | state_opened | -+----+-----------------------+---------+----------+--------------+ -| 1 | Bjorn Fairclough | 1200 | checking | AL | -| 2 | Bjorn Fairclough | 2500 | savings | AL | -| 3 | Arturo Nevin | 250 | checking | AK | -[ truncated ] -+----+-----------------------+---------+----------+--------------+ -~~~ - -### Filter Rows - -#### Filter on a Single Condition - -Filter rows with expressions that use columns and return Boolean values in the `WHERE` clause. - -~~~ sql -> SELECT name, balance -FROM accounts -WHERE balance < 300; -~~~ -~~~ -+------------------+---------+ -| name | balance | -+------------------+---------+ -| Arturo Nevin | 250 | -| Akbar Jinks | 250 | -| Andrea Maas | 250 | -+------------------+---------+ -~~~ - -#### Filter on Multiple Conditions - -To use multiple `WHERE` filters join them with `AND` or `OR`. You can also create negative filters with `NOT`. - -~~~ sql -> SELECT * -FROM accounts -WHERE balance > 2500 AND NOT type = 'checking'; -~~~ -~~~ -+----+-------------------+---------+---------+--------------+ -| id | name | balance | type | state_opened | -+----+-------------------+---------+---------+--------------+ -| 4 | Tullia Romijnders | 3000 | savings | AK | -| 62 | Ruarc Mathews | 3000 | savings | OK | -+----+-------------------+---------+---------+--------------+ -~~~ - -#### Select Distinct Rows - -Columns without the [Primary Key](primary-key.html) or [Unique](unique.html) constraints can have multiple instances of the same value. - -~~~ sql -> SELECT name -FROM accounts -WHERE state_opened = 'VT'; -~~~ -~~~ -+----------------+ -| name | -+----------------+ -| Sibylla Malone | -| Sibylla Malone | -+----------------+ -~~~ - -Using `DISTINCT`, you can remove all but one instance of duplicate values from your retrieved data. - -~~~ sql -> SELECT DISTINCT name -FROM accounts -WHERE state_opened = 'VT'; -~~~ -~~~ -+----------------+ -| name | -+----------------+ -| Sibylla Malone | -+----------------+ -~~~ - -#### Filter Values with a List - -Using `WHERE IN ()` performs an `OR` search for listed values in the specified column. - -~~~ sql -> SELECT name, balance, state_opened -FROM accounts -WHERE state_opened IN ('AZ', 'NY', 'WA'); -~~~ -~~~ -+-----------------+---------+--------------+ -| name | balance | state_opened | -+-----------------+---------+--------------+ -| Naseem Joossens | 300 | AZ | -| Aygün Sanna | 900 | NY | -| Carola Dahl | 800 | NY | -| Edna Barath | 750 | WA | -| Edna Barath | 2200 | WA | -+-----------------+---------+--------------+ -~~~ - -### Rename Columns in Output - -Instead of outputting a column's name in the retrieved table, you can change its label using `AS`. - -~~~ sql -> SELECT name AS NY_accounts, balance -FROM accounts -WHERE state_opened = 'NY'; -~~~ -~~~ -+-------------+---------+ -| NY_accounts | balance | -+-------------+---------+ -| Aygün Sanna | 900 | -| Carola Dahl | 800 | -+-------------+---------+ -~~~ - -This *does not* change the name of the column in the table. To do that, use [`RENAME COLUMN`](rename-column.html). - -### Search for String Values - -Search for partial [string](string.html) matches in columns using `LIKE`, which supports the following wildcard operators: - -- `%` matches 0 or more characters -- `_` matches exactly 1 character - -~~~ sql -> SELECT id, name, type -FROM accounts -WHERE name LIKE 'Anni%'; -~~~ -~~~ -+----+----------------+----------+ -| id | name | type | -+----+----------------+----------+ -| 58 | Annibale Karga | checking | -| 59 | Annibale Karga | savings | -+----+----------------+----------+ -~~~ - -### Aggregate Functions - -[Aggregate functions](functions-and-operators.html#aggregate-functions) perform calculations on retrieved rows. - -#### Perform Aggregate Function on Entire Column - -By using an aggregate function as a `target_elem`, you can perform the calculation on the entire column. - -~~~sql -> SELECT MIN(balance) -FROM accounts; -~~~ -~~~ -+--------------+ -| MIN(balance) | -+--------------+ -| 250 | -+--------------+ -~~~ - -You can also use the retrieved value as part of an expression. For example, you can use the result in the `WHERE` clause to select additional rows that were not part of the aggregate function itself. - -~~~ sql -> SELECT id, name, balance -FROM accounts -WHERE balance = ( - SELECT - MIN(balance) - FROM accounts -); -~~~ -~~~ -+----+------------------+---------+ -| id | name | balance | -+----+------------------+---------+ -| 3 | Arturo Nevin | 250 | -| 10 | Henrik Brankovic | 250 | -| 26 | Odalys Ziemniak | 250 | -| 35 | Vayu Soun | 250 | -+----+------------------+---------+ -~~~ - -#### Perform Aggregate Function on Retrieved Rows - -By filtering the statement, you can perform the calculation only on retrieved rows. - -~~~sql -> SELECT SUM(balance) -FROM accounts -WHERE state_opened IN ('AZ', 'NY', 'WA'); -~~~ -~~~ -+--------------+ -| SUM(balance) | -+--------------+ -| 4950 | -+--------------+ -~~~ - -#### Filter Columns Fed into Aggregate Functions - -You can use `FILTER (WHERE )` in the `target_elem` to filter which rows are processed by an aggregate function; those that return `FALSE` or `NULL` for the `FILTER` clause's Boolean expression are not fed into the aggregate function. - -~~~ sql -> SELECT count(*) AS unfiltered, count(*) FILTER (WHERE balance > 1500) AS filtered FROM accounts; -~~~ -~~~ -+------------+----------+ -| unfiltered | filtered | -+------------+----------+ -| 84 | 14 | -+------------+----------+ -~~~ - -#### Create Aggregate Groups - -Instead of performing aggregate functions on an the entire set of retrieved rows, you can split the rows into groups and then perform the aggregate function on each of them. - -When creating aggregate groups, each column used as a `target_elem` must be included in `GROUP BY`. - -~~~ sql -> SELECT state_opened AS state, SUM(balance) AS state_balance -FROM accounts -WHERE state_opened IN ('AZ', 'NY', 'WA') -GROUP BY state_opened; -~~~ -~~~ -+-------+---------------+ -| state | state_balance | -+-------+---------------+ -| AZ | 300 | -| NY | 1700 | -| WA | 2950 | -+-------+---------------+ -~~~ - -#### Filter Aggregate Groups - -To filter aggregate groups, use `HAVING`, which is the equivalent of the `WHERE` clause for aggregate groups, which must evlauate to a Boolean value. - -~~~ sql -> SELECT state_opened, AVG(balance) as avg -FROM accounts -GROUP BY state_opened -HAVING AVG(balance) BETWEEN 1700 AND 50000; -~~~ -~~~ -+--------------+---------+ -| state_opened | avg | -+--------------+---------+ -| AR | 3700.00 | -| UT | 1750.00 | -| OH | 2500.00 | -| AL | 1850.00 | -+--------------+---------+ -~~~ - -#### Use Aggregate Functions in Having Clause - -Aggregate functions can also be used in the `HAVING` clause without needing to be included as a `target_elem`. - -~~~ sql -> SELECT name, state_opened -FROM accounts -WHERE state_opened in ('LA', 'MO') -GROUP BY name, state_opened -HAVING COUNT(name) > 1; -~~~ -~~~ -+----------------+--------------+ -| name | state_opened | -+----------------+--------------+ -| Yehoshua Kleid | MO | -+----------------+--------------+ -~~~ - -### Combine Multiple Selects (Union, Intersect, Except) - -SQL lets you compare the results of multiple `SELECT` statements. You can think of each of these clauses as representing a Boolean operator: - -- `UNION` = OR -- `INTERSECT` = AND -- `EXCEPT` = NOT - -By default, each of these comparisons displays only one copy of each value (similar to `SELECT DISTINCT`). However, each function also lets you add an `ALL` to the clause to display duplicate values. - -#### Union: Combine Two Queries - -`UNION` combines the results of two `SELECT` queries into one result. - -~~~ sql -> SELECT name -FROM accounts -WHERE state_opened IN ('AZ', 'NY') -UNION -SELECT name -FROM mortgages -WHERE state_opened IN ('AZ', 'NY'); -~~~ -~~~ -+-----------------+ -| name | -+-----------------+ -| Naseem Joossens | -| Ricarda Caron | -| Carola Dahl | -| Aygün Sanna | -+-----------------+ -~~~ - -To show duplicate rows, you can use `ALL`. - -~~~ sql -> SELECT name -FROM accounts -WHERE state_opened IN ('AZ', 'NY') -UNION ALL -SELECT name -FROM mortgages -WHERE state_opened IN ('AZ', 'NY'); -~~~ -~~~ -+-----------------+ -| name | -+-----------------+ -| Naseem Joossens | -| Ricarda Caron | -| Carola Dahl | -| Naseem Joossens | -| Aygün Sanna | -| Carola Dahl | -+-----------------+ -~~~ - -#### Intersect: Retrieve Intersection of Two Queries - -`INTERSECT` finds only values that are present in both `SELECT` queries. - -~~~ sql -> SELECT name -FROM accounts -WHERE state_opened IN ('NJ', 'VA') -INTERSECT -SELECT name -FROM mortgages; -~~~ -~~~ -+-----------------+ -| name | -+-----------------+ -| Danijel Whinery | -| Agar Archer | -+-----------------+ -~~~ - -#### Except: Exclude One Query's Results from Another - -`EXCEPT` finds values that are present in the first `SELECT` statement but not the second. - -~~~ sql -> SELECT name -FROM mortgages -EXCEPT -SELECT name -FROM accounts; -~~~ -~~~ -+------------------+ -| name | -+------------------+ -| Günay García | -| Karla Goddard | -| Cybele Seaver | -+------------------+ -~~~ - -### Sorting Retrieved Values - -You can use an `ORDER BY` clause to sort retrieved rows by one or more columns. - -{{site.data.alerts.callout_info}}When ORDER BY is not included in a query, rows are not sorted by any consistent criteria. Instead, CockroachDB returns them as the coordinating node receives them.{{site.data.alerts.end}} - -#### Order Retrieved Rows by One Column - -~~~ sql -> SELECT * -FROM accounts -WHERE balance BETWEEN 350 AND 500 -ORDER BY balance DESC; -~~~ -~~~ -+----+--------------------+---------+----------+--------------+ -| id | name | balance | type | state_opened | -+----+--------------------+---------+----------+--------------+ -| 12 | Raniya Žitnik | 500 | savings | CT | -| 59 | Annibale Karga | 500 | savings | ND | -| 27 | Adelbert Ventura | 500 | checking | IA | -| 86 | Theresa Slaski | 500 | checking | WY | -| 73 | Ruadh Draganov | 500 | checking | TN | -| 16 | Virginia Ruan | 400 | checking | HI | -| 43 | Tahirih Malinowski | 400 | checking | MS | -| 50 | Dusan Mallory | 350 | savings | NV | -+----+--------------------+---------+----------+--------------+ -~~~ - -#### Order Retrieved Rows by Multiple Columns - -Columns are sorted in the order you list them in `sortby_list`. For example, `ORDER BY a, b` sorts the rows by column `a` and then sorts rows with the same `a` value by their column `b` values. - -~~~ sql -> SELECT * -FROM accounts -WHERE balance BETWEEN 350 AND 500 -ORDER BY balance DESC, name ASC; -~~~ -~~~ -+----+--------------------+---------+----------+--------------+ -| id | name | balance | type | state_opened | -+----+--------------------+---------+----------+--------------+ -| 27 | Adelbert Ventura | 500 | checking | IA | -| 59 | Annibale Karga | 500 | savings | ND | -| 12 | Raniya Žitnik | 500 | savings | CT | -| 73 | Ruadh Draganov | 500 | checking | TN | -| 86 | Theresa Slaski | 500 | checking | WY | -| 43 | Tahirih Malinowski | 400 | checking | MS | -| 16 | Virginia Ruan | 400 | checking | HI | -| 50 | Dusan Mallory | 350 | savings | NV | -+----+--------------------+---------+----------+--------------+ -~~~ - -### Control Quantity of Returned Rows - -#### Limit Number of Retrieved Results - -You can reduce the number of results with `LIMIT`. - -~~~ sql -> SELECT id, name -FROM accounts -LIMIT 5; -~~~ -~~~ -+----+------------------+ -| id | name | -+----+------------------+ -| 1 | Bjorn Fairclough | -| 2 | Bjorn Fairclough | -| 3 | Arturo Nevin | -| 4 | Arturo Nevin | -| 5 | Naseem Joossens | -+----+------------------+ -~~~ - -#### Paginate Through Limited Results - -If you want to limit the number of results, but go beyond the initial set, use `OFFSET` to proceed to the next set of results. This is often used to paginate through large tables where not all of the values need to be immediately retrieved. - -~~~ sql -> SELECT id, name -FROM accounts -LIMIT 5 -OFFSET 5; -~~~ -~~~ -+----+------------------+ -| id | name | -+----+------------------+ -| 6 | Juno Studwick | -| 7 | Juno Studwick | -| 8 | Eutychia Roberts | -| 9 | Ricarda Moriarty | -| 10 | Henrik Brankovic | -+----+------------------+ -~~~ - -### Force Index Selection (Index Hints) - -By using "index hints", you can override [CockroachDB's index selection](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/) and use a specific [index](indexes.html) for your `SELECT` statement. - -{{site.data.alerts.callout_info}}Index selection can impact performance, but does not change the result of a SELECT statement.{{site.data.alerts.end}} - -~~~ sql -> SHOW INDEXES FROM accounts; -~~~ -~~~ -+----------+-------------------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+----------+-------------------+--------+-----+--------+-----------+---------+----------+ -| accounts | primary | true | 1 | id | ASC | false | false | -| accounts | accounts_name_idx | false | 1 | name | ASC | false | false | -| accounts | accounts_name_idx | false | 2 | id | ASC | false | true | -+----------+-------------------+--------+-----+--------+-----------+---------+----------+ -(3 rows) -~~~ -~~~ sql -> SELECT name, balance -FROM accounts@accounts_name_idx -WHERE name = 'Edna Barath'; -~~~ -~~~ -+-------------+---------+ -| name | balance | -+-------------+---------+ -| Edna Barath | 750 | -| Edna Barath | 2200 | -+-------------+---------+ -~~~ - -### Select Historical Data (Time Travel) - -CockroachDB lets you find data as it was stored at a given point in time using `AS OF SYSTEM TIME` with various [supported formats](as-of-system-time.html). - -{{site.data.alerts.callout_info}}Historical data is available only within the garbage collection window, which is determined by the ttlseconds field in the replication zone configuration.{{site.data.alerts.end}} - -Imagine this example represents the database's current data. - -~~~ sql -> SELECT name, balance -FROM accounts -WHERE name = 'Edna Barath'; -~~~ -~~~ -+-------------+---------+ -| name | balance | -+-------------+---------+ -| Edna Barath | 750 | -| Edna Barath | 2200 | -+-------------+---------+ -~~~ - -We could instead retrieve the values as they were on October 3, 2016 at 12:45 UTC. - -~~~ sql -> SELECT name, balance -FROM accounts -AS OF SYSTEM TIME '2016-10-03 12:45:00' -WHERE name = 'Edna Barath'; -~~~ -~~~ -+-------------+---------+ -| name | balance | -+-------------+---------+ -| Edna Barath | 450 | -| Edna Barath | 2000 | -+-------------+---------+ -~~~ - - -## See Also -- [`INSERT`](insert.html) -- [`UPDATE`](update.html) -- [`CREATE TABLE`](create-table.html) -- [SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/serial.md b/src/current/v1.0/serial.md deleted file mode 100644 index 18d4e90eeda..00000000000 --- a/src/current/v1.0/serial.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: SERIAL -summary: The SERIAL data type defaults to a unique 64-bit signed integer that is the combination of the insert timestamp and the ID of the node. -toc: true ---- - -The `SERIAL` [data type](data-types.html) is a column data type that, on insert, generates a default integer from the timestamp and ID of the node executing the insert. This combination is likely to be globally unique except in extreme cases (see this [example](create-table.html#create-a-table-with-auto-generated-unique-row-ids) for more details). Also, because value generation does not require talking to other nodes, it is much faster than sequentially auto-incrementing a value, which requires distributed coordination. - -{{site.data.alerts.callout_info}} -We believe this data type is a better solution than PostgreSQL's SERIAL and MySQL's AUTO_INCREMENT types, both of which auto-increment integers but not necessarily in a strictly sequential fashion (see the Auto-Incrementing Is Not Always Sequential example below). However, if you find that this feature is incompatible with your application, please open an -issue. -{{site.data.alerts.end}} - - -## Aliases - -The `SERIAL` type is equivalent to [`INT DEFAULT unique_rowid()`](int.html). - -In CockroachDB, the following are aliases for `SERIAL`: - -- `SMALLSERIAL` -- `BIGSERIAL` - -## Syntax - -Any `INT` value is a valid `SERIAL` value; in particular constant `SERIAL` values can be expressed using [numeric literals](sql-constants.html#numeric-literals). - -## Size - -[Same as `INT`](int.html#size). - -## Examples - -### Use `SERIAL` to Auto-Generate Primary Keys - -In this example, we create a table with the `SERIAL` column as the primary key so we can auto-generate unique IDs on insert. - -~~~ sql -> CREATE TABLE serial (a SERIAL PRIMARY KEY, b STRING, c BOOL); -~~~ - -The [`SHOW COLUMNS`](show-columns.html) statement shows that the `SERIAL` type is just an alias for `INT` with `unique_rowid()` as the default. - -~~~ sql -> SHOW COLUMNS FROM serial; -~~~ - -~~~ -+-------+------------+-------+----------------+ -| Field | Type | Null | Default | -+-------+------------+-------+----------------+ -| a | INT | false | unique_rowid() | -| b | STRING | true | NULL | -| c | BOOL | true | NULL | -+-------+------------+-------+----------------+ -~~~ - -When we insert rows without values in column `a` and display the new rows, we see that each row has defaulted to a unique value in column `a`. - -~~~ sql -> INSERT INTO serial (b,c) VALUES ('red', true), ('yellow', false), ('pink', true); -> INSERT INTO serial (a,b,c) VALUES (123, 'white', false); -> SELECT * FROM serial; -~~~ - -~~~ -+--------------------+--------+-------+ -| a | b | c | -+--------------------+--------+-------+ -| 148656994422095873 | red | true | -| 148656994422161409 | yellow | false | -| 148656994422194177 | pink | true | -| 123 | white | false | -+--------------------+--------+-------+ -~~~ - -### Auto-Incrementing Is Not Always Sequential - -It's a common misconception that the auto-incrementing types in PostgreSQL and MySQL generate strictly sequential values. In fact, each insert increases the sequence by one, even when the insert is not commited. This means that auto-incrementing types may leave gaps in a sequence. - -To experience this for yourself, run through the following example in PostgreSQL: - -1. Create a table with a `SERIAL` column. - - ~~~ sql - > CREATE TABLE increment (a SERIAL PRIMARY KEY); - ~~~ - -2. Run four transactions for inserting rows. - - ~~~ sql - > BEGIN; INSERT INTO increment DEFAULT VALUES; ROLLBACK; - > BEGIN; INSERT INTO increment DEFAULT VALUES; COMMIT; - > BEGIN; INSERT INTO increment DEFAULT VALUES; ROLLBACK; - > BEGIN; INSERT INTO increment DEFAULT VALUES; COMMIT; - ~~~ - -3. View the rows created. - - ~~~ sql - > SELECT * from increment; - ~~~ - ~~~ - +---+ - | a | - +---+ - | 2 | - | 4 | - +---+ - ~~~ - - Since each insert increased the sequence in column `a` by one, the first commited insert got the value `2`, and the second commited insert got the value `4`. As you can see, the values aren't strictly sequential, and the last value doesn't give an accurate count of rows in the table. - -In summary, the `SERIAL` type in PostgreSQL and CockroachDB, and the `AUTO_INCREMENT` type in MySQL, all behave the same in that they do not create strict sequences. CockroachDB will likely create more gaps than these other databases, but will generate these values much faster. - -## Supported Casting & Conversion - -[Values of type `SERIAL` can be converted to other types like any `INT` values](int.html#supported-casting-conversion). - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/set-cluster-setting.md b/src/current/v1.0/set-cluster-setting.md deleted file mode 100644 index 020d5e5182f..00000000000 --- a/src/current/v1.0/set-cluster-setting.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: SET CLUSTER SETTING -summary: The SET CLUSTER SETTING statement configures one cluster setting. -toc: true ---- - -The `SET CLUSTER SETTING` [statement](sql-statements.html) modifies a [cluster-wide setting](cluster-settings.html). - -{{site.data.alerts.callout_danger}}Many cluster settings are intended for tuning CockroachDB internals. Before changing these settings, we strongly encourage you to discuss your goals with CockroachDB; otherwise, you use them at your own risk.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/set_cluster_setting.html %} - -{{site.data.alerts.callout_info}}The SET CLUSTER SETTING statement is unrelated to the other SET TRANSACTION and SET (session variable) statements.{{site.data.alerts.end}} - -## Required Privileges - -Only the `root` user can modify cluster settings. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `var_name` | See the description of [cluster settings](cluster-settings.html). | - -The variable name is case-insensitive. - -## Examples - -### Change the Default Distributed Execution Parameter - -You can configure a cluster so that new sessions automatically try to run queries [in a distributed fashion](https://www.cockroachlabs.com/blog/local-and-distributed-processing-in-cockroachdb/): - -~~~ sql -> SET CLUSTER SETTING sql.defaults.distsql = 1; -~~~ - -You can also disable distributed execution for all new sessions: - -~~~ sql -> SET CLUSTER SETTING sql.defaults.distsql = 0; -~~~ - -### Disable Automatic Diagnostic Reporting - -You can opt out of -[automatic diagnostic reporting](diagnostics-reporting.html) of usage -data to Cockroach Labs using the following: - -~~~ sql -> SET CLUSTER SETTING diagnostics.reporting.enabled = false; -> SHOW CLUSTER SETTING diagnostics.reporting.enabled; -~~~ - - -~~~ -+-------------------------------+ -| diagnostics.reporting.enabled | -+-------------------------------+ -| false | -+-------------------------------+ -(1 row) -~~~ - -## See Also - -- [`SET` (session variable)](set-vars.html) -- [`SHOW CLUSTER SETTING`](show-cluster-setting.html) -- [Cluster settings](cluster-settings.html) diff --git a/src/current/v1.0/set-transaction.md b/src/current/v1.0/set-transaction.md deleted file mode 100644 index c3c84d4a6f5..00000000000 --- a/src/current/v1.0/set-transaction.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: SET TRANSACTION -summary: The SET TRANSACTION statement sets the transaction isolation level and/or priority for the current session or for an individual transaction. -toc: true ---- - -The `SET TRANSACTION` [statement](sql-statements.html) sets the transaction isolation level or priority after you [`BEGIN`](begin-transaction.html) it but before executing the first statement that manipulates a database. - -{{site.data.alerts.callout_info}}You can also set the session's default isolation level.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/set_transaction.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to set the transaction isolation level or priority. However, privileges are required for each statement within a transaction. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `ISOLATION LEVEL` | If you do not want the transaction to run as `SERIALIZABLE` (CockroachDB's default, which provides the highest level of isolation), you can set the isolation level to `SNAPSHOT`, which can provide better performance in high-contention scenarios.

    For more information, see [Transactions: Isolation Levels](transactions.html#isolation-levels).

    **Default**: `SERIALIZABLE` | -| `PRIORITY` | If you do not want the transaction to run with `NORMAL` priority, you can set it to `LOW` or `HIGH`.

    Transactions with higher priority are less likely to need to be retried.

    For more information, see [Transactions: Priorities](transactions.html#transaction-priorities).

    **Default**: `NORMAL` | - -## Examples - -### Set Isolation & Priority - -You can set a transaction's isolation level to `SNAPSHOT`, as well as its priority to `LOW` or `HIGH`. - -~~~ sql -> BEGIN; - -> SET TRANSACTION ISOLATION LEVEL SNAPSHOT, PRIORITY HIGH; - -> SAVEPOINT cockroach_restart; - -> UPDATE products SET inventory = 0 WHERE sku = '8675309'; - -> INSERT INTO orders (customer, sku, status) VALUES (1001, '8675309', 'new'); - -> RELEASE SAVEPOINT cockroach_restart; - -> COMMIT; -~~~ - -{{site.data.alerts.callout_danger}}This example assumes you're using client-side intervention to handle transaction retries.{{site.data.alerts.end}} - -### Set Session's Default Isolation - -You can also set the default isolation level for all transactions in the client's current session using `SET DEFAULT_TRANSACTION_ISOLATION TO `. - -~~~ sql -> SHOW DEFAULT_TRANSACTION_ISOLATION; -~~~ -~~~ -+-------------------------------+ -| default_transaction_isolation | -+-------------------------------+ -| SERIALIZABLE | -+-------------------------------+ -(1 row) -~~~ -~~~ sql -> SET DEFAULT_TRANSACTION_ISOLATION TO SNAPSHOT; -~~~ -~~~ -SET -~~~ -~~~ sql -> SHOW DEFAULT_TRANSACTION_ISOLATION; -~~~ -~~~ -+-------------------------------+ -| default_transaction_isolation | -+-------------------------------+ -| SNAPSHOT | -+-------------------------------+ -(1 row) -~~~ - -## See Also - -- [`SET`](set-vars.html) -- [Transaction parameters](transactions.html#transaction-parameters) -- [`BEGIN`](begin-transaction.html) -- [`COMMIT`](commit-transaction.html) -- [`SAVEPOINT`](savepoint.html) -- [`RELEASE SAVEPOINT`](release-savepoint.html) -- [`ROLLBACK`](rollback-transaction.html) diff --git a/src/current/v1.0/set-vars.md b/src/current/v1.0/set-vars.md deleted file mode 100644 index 828ee807560..00000000000 --- a/src/current/v1.0/set-vars.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: SET (session settings) -summary: The SET statement modifies the current settings for the client session. -toc: true ---- - -The `SET` [statement](sql-statements.html) can modify one of the -session setting variables. These can also be queried via [`SHOW`](show-vars.html). - -{{site.data.alerts.callout_danger}}In some cases, client drivers can drop and restart the connection to the server. When this happens, any session configurations made with SET statements are lost. It is therefore more reliable to configure the session in the client's connection string. For examples in different languages, see the Build an App with CockroachDB tutorials.{{site.data.alerts.end}} - - -## Required Privileges - -No [privileges](privileges.html) are required to modify the session settings. - -## Synopsis - -General syntax: - -{% include {{ page.version.version }}/sql/diagrams/set_var.html %} - -{{site.data.alerts.callout_info}}The SET statement for session settings is unrelated to the other SET TRANSACTION and SET CLUSTER SETTING statements.{{site.data.alerts.end}} - -## Parameters - -The `SET ` statement accepts two parameters: the -variable name and the value to use to modify the variable. - -The variable name is case insensitive. The value can be a list of one or more items. For example, the variable `search_path` is multi-valued. - -### Supported Variables - -| Variable name | Description | Initial value | Can be viewed with [`SHOW`](show-vars.html)? | -|---------------------------------|--------------|---------------|----------------------------------------------| -| `application_name` | The current application name for statistics collection. | Empty string. | Yes | -| `database` | The default database for the current session. | Database in connection string, or empty if not specified. | Yes | -| `search_path` | A list of databases or namespaces that will be searched to resolve unqualified table or function names. For more details, see [Name Resolution](sql-name-resolution.html). | "`{pg_catalog}`" (for ORM compatibility). | Yes | -| `time zone` | The default time zone for the current session. See [`SET TIME ZONE` notes](#set-time-zone) below. | `UTC` | Yes | -| `default_transaction_isolation` | The default transaction isolation level for the current session. See [Transaction parameters](transactions.html#transaction-parameters) and [`SET TRANSACTION`](set-transaction.html) for more details. | Settings in connection string, or "`SERIALIZABLE`" if not specified. | Yes | -| `client_encoding` | Ignored; recognized for compatibility with PostgreSQL clients. Only possible value is "`UTF8`". | N/A | No | -| `client_min_messages` | Ignored; recognized for compatibility with PostgreSQL clients. Only possible value is "`on`". | N/A | No | -| `standard_conforming_strings` | Ignored; recognized for compatibility with PostgreSQL clients. | N/A | No | -| `extra_float_digits` | Ignored; recognized for compatibility with PostgreSQL clients. | N/A | No | - -Special syntax cases: - -| Syntax | Equivalent to | -|--------|---------------| -| `SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL ...` | `SET default_transaction_isolation = ...` | -| `SET TIME ZONE ...` | Special syntax because the variable name contains a space. See [`SET TIME ZONE`](#set-time-zone) below. | - - -## Examples - -### Set Simple Variables - -The following demonstrates how `SET` can be used to configure the -default database for the current session: - -~~~ sql -> SET database = bank; -> SHOW database; -~~~ - -~~~ -+----------+ -| database | -+----------+ -| bank | -+----------+ -(1 row) -~~~ - -### Set Variables to Values Containing Spaces - -The following demonstrates how to use quoting to use values containing spaces: - -~~~ sql -> SET database = "database name with spaces"; -> SHOW database; -~~~ - -~~~ -+---------------------------+ -| database | -+---------------------------+ -| database name with spaces | -+---------------------------+ -(1 row) -~~~ - -### Set Variables to a List of Values - -The following demonstrates how to assign a list of values: - -~~~ sql -> SET search_path = mydb, otherdb; -> SHOW search_path; -~~~ - -~~~ -+---------------------------+ -| search_path | -+---------------------------+ -| pg_catalog, mydb, otherdb | -+---------------------------+ -(1 row) -~~~ - -## `SET TIME ZONE` - -{{site.data.alerts.callout_danger}}As a best practice, we recommend not using this setting and avoid setting a session time for your database. We instead recommend converting UTC values to the appropriate time zone on the client side.{{site.data.alerts.end}} - -You can control your client's default time zone for the current session with `SET TIME ZONE`. This will apply a session offset to all [`TIMESTAMP WITH TIME ZONE`](timestamp.html) values. - -{{site.data.alerts.callout_info}}With setting SET TIME ZONE, CockroachDB uses UTC as the default time zone.{{site.data.alerts.end}} - -`SET TIME ZONE` uses a special syntax form used to configure the `"time zone"` session parameter because `SET` cannot assign to parameter names containing spaces. - -### Parameters - -The time zone value indicates the time zone for the current session. - -This value can be a string representation of a local system-defined -time zone (e.g., `'EST'`, `'America/New_York'`) or a positive or -negative numeric offset from UTC (e.g., `-7`, `+7`). Also, `DEFAULT`, -`LOCAL`, or `0` sets the session time zone to `UTC`. - -### Example: Set the Default Time Zone via `SET TIME ZONE` - -~~~ sql -> SET TIME ZONE 'EST'; -> SHOW TIME ZONE; -~~~ -~~~ shell -+-----------+ -| time zone | -+-----------+ -| EST | -+-----------+ -(1 row) -~~~ -~~~ sql -> SET TIME ZONE DEFAULT; -> SHOW TIME ZONE; -~~~ -~~~ shell -+-----------+ -| time zone | -+-----------+ -| UTC | -+-----------+ -(1 row) -~~~ - -## See Also - -- [`SET TRANSACTION`](set-transaction.html) -- [`SET CLUSTER SETTING`](set-cluster-setting.html) -- [`SHOW` (session variable)](show-vars.html) -- [The `TIMESTAMP` and `TIMESTAMPTZ` data types.](timestamp.html) diff --git a/src/current/v1.0/show-cluster-setting.md b/src/current/v1.0/show-cluster-setting.md deleted file mode 100644 index e69f9e5376c..00000000000 --- a/src/current/v1.0/show-cluster-setting.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: SHOW CLUSTER SETTING -summary: The SHOW CLUSTER SETTING statement displays the current cluster settings. -toc: true ---- - -The `SHOW CLUSTER SETTING` [statement](sql-statements.html) can -display the value of either one or all of the -[cluster settings](cluster-settings.html). These can also be configured -via [`SET CLUSTER SETTING`](set-cluster-setting.html). - - -## Required Privileges - -No [privileges](privileges.html) are required to display the cluster settings. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_cluster_setting.html %} - -{{site.data.alerts.callout_info}}The SHOW statement for cluster settings is unrelated to the other SHOW statements: SHOW (session variable), SHOW CREATE TABLE, SHOW CREATE VIEW, SHOW USERS, SHOW DATABASES, SHOW COLUMNS, SHOW GRANTS, and SHOW CONSTRAINTS.{{site.data.alerts.end}} - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `any_name` | See the description of [cluster settings](cluster-settings.html). | - -The variable name is case insensitive. - -## Examples - -### Showing the Value of a Single Cluster Setting - -~~~ sql -> SHOW CLUSTER SETTING diagnostics.reporting.enabled; -~~~ - -~~~ -+-------------------------------+ -| diagnostics.reporting.enabled | -+-------------------------------+ -| true | -+-------------------------------+ -(1 row) -~~~ - -~~~ sql -> SHOW CLUSTER SETTING sql.default.distsql; -~~~ - -~~~ -+----------------------+ -| sql.defaults.distsql | -+----------------------+ -| 1 | -+----------------------+ -(1 row) -~~~ - -### Showing the Value of All Cluster Settings - -~~~ sql -> SHOW ALL CLUSTER SETTINGS; -~~~ - -~~~ -+-------------------------------+---------------+------+--------------------------------------------------------+ -| name | current_value | type | description | -+-------------------------------+---------------+------+--------------------------------------------------------+ -| diagnostics.reporting.enabled | true | b | enable reporting diagnostic metrics to cockroach labs | -| ... | ... | ... | ... | -+-------------------------------+---------------+------+--------------------------------------------------------+ -(24 rows) -~~~ - -## See Also - -- [`SET CLUSTER SETTING`](set-vars.html) -- [Cluster settings](cluster-settings.html) -- [`SHOW` (session variable)](show-vars.html) -- [`SHOW COLUMNS`](show-columns.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) -- [`SHOW CREATE TABLE`](show-create-table.html) -- [`SHOW CREATE VIEW`](show-create-view.html) -- [`SHOW DATABASES`](show-databases.html) -- [`SHOW GRANTS`](show-grants.html) -- [`SHOW INDEX`](show-index.html) -- [`SHOW USERS`](show-users.html) diff --git a/src/current/v1.0/show-columns.md b/src/current/v1.0/show-columns.md deleted file mode 100644 index ec142909238..00000000000 --- a/src/current/v1.0/show-columns.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: SHOW COLUMNS -summary: The SHOW COLUMNS statement shows details about columns in a table, including each column's name, type, default value, and whether or not it's nullable. -toc: true ---- - -The `SHOW COLUMNS` [statement](sql-statements.html) shows details about columns in a table, including each column's name, type, default value, and whether or not it's nullable. - - -## Required Privileges - -The user must have any [privilege](privileges.html) on the target table. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_columns.html %} - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | The name of the table for which to show columns. - -## Response - -The following fields are returned for each column. - -Field | Description -------|------------ -`Field` | The name of the column. -`Type` | The [data type](data-types.html) of the column. -`Null` | Whether or not the column accepts `NULL`. Possible values: `true` or `false`. -`Default` | The default value for the column, or an expression that evaluates to a default value. -`Indices` | The list of [indexes](indexes.html) that the column is involved in, as an array. - -## Example - -~~~ sql -> CREATE TABLE orders ( - id INT PRIMARY KEY DEFAULT unique_rowid(), - date TIMESTAMP NOT NULL, - priority INT DEFAULT 1, - customer_id INT UNIQUE, - status STRING DEFAULT 'open', - CHECK (priority BETWEEN 1 AND 5), - CHECK (status in ('open', 'in progress', 'done', 'cancelled')), - FAMILY (id, date, priority, customer_id, status) -); - -> SHOW COLUMNS FROM orders; -~~~ - -~~~ -+-------------+-----------+-------+----------------+----------------------------------+ -| Field | Type | Null | Default | Indices | -+-------------+-----------+-------+----------------+----------------------------------+ -| id | INT | false | unique_rowid() | {primary,orders_customer_id_key} | -| date | TIMESTAMP | false | NULL | {} | -| priority | INT | true | 1 | {} | -| customer_id | INT | true | NULL | {orders_customer_id_key} | -| status | STRING | true | 'open' | {} | -+-------------+-----------+-------+----------------+----------------------------------+ -(5 rows) -~~~ - -## See Also - -- [`CREATE TABLE`](create-table.html) -- [Information Schema](information-schema.html) -- [Other SQL Statements](sql-statements.html) - diff --git a/src/current/v1.0/show-constraints.md b/src/current/v1.0/show-constraints.md deleted file mode 100644 index 40e1912bb0a..00000000000 --- a/src/current/v1.0/show-constraints.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: SHOW CONSTRAINTS -summary: The SHOW CONSTRAINTS statement lists the constraints on a table. -toc: true ---- - -The `SHOW CONSTRAINTS` [statement](sql-statements.html) lists all named [constraints](constraints.html) as well as any unnamed Check constraints on a table. - -{{site.data.alerts.callout_danger}}The SHOW CONSTRAINTS statement is under development; the exact output will continue to change.{{site.data.alerts.end}} - - -## Required Privileges - -The user must have any [privilege](privileges.html) on the target table. - -## Aliases - -`SHOW CONSTRAINT` is an alias for `SHOW CONSTRAINTS`. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_constraints.html %} - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | The name of the table for which to show constraints. - -## Response - -The following fields are returned for each constraint. - -{{site.data.alerts.callout_danger}}The SHOW CONSTRAINTS statement is under development; the exact output will continue to change.{{site.data.alerts.end}} - -Field | Description -------|------------ -`Table` | The name of the table. -`Name` | The name of the constraint. -`Type` | The type of constraint. -`Column(s)` | The columns to which the constraint applies. For [Check constraints](check.html), column information will be in `Details` and this field will be `NULL`. -`Details` | The conditions for a Check constraint. - -## Example - -~~~ sql -> CREATE TABLE orders ( - id INT PRIMARY KEY, - date TIMESTAMP NOT NULL, - priority INT DEFAULT 1, - customer_id INT UNIQUE, - status STRING DEFAULT 'open', - CHECK (priority BETWEEN 1 AND 5), - CHECK (status in ('open', 'in progress', 'done', 'cancelled')), - FAMILY (id, date, priority, customer_id, status) -); - -> SHOW CONSTRAINTS FROM orders; -~~~ -~~~ -+--------+------------------------+-------------+---------------+--------------------------------------------------------+ -| Table | Name | Type | Column(s) | Details | -+--------+------------------------+-------------+---------------+--------------------------------------------------------+ -| orders | | CHECK | NULL | status IN ('open', 'in progress', 'done', 'cancelled') | -| orders | | CHECK | NULL | priority BETWEEN 1 AND 5 | -| orders | orders_customer_id_key | UNIQUE | [customer_id] | NULL | -| orders | primary | PRIMARY KEY | [id] | NULL | -+--------+------------------------+-------------+---------------+--------------------------------------------------------+ -(4 rows) -~~~ - -## See Also - -- [Constraints](constraints.html) -- [`CREATE TABLE`](create-table.html) -- [Information Schema](information-schema.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/show-create-table.md b/src/current/v1.0/show-create-table.md deleted file mode 100644 index f51010b607d..00000000000 --- a/src/current/v1.0/show-create-table.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: SHOW CREATE TABLE -summary: The SHOW CREATE TABLE statement shows the CREATE TABLE statement that would create a carbon copy of the specified table. -toc: true ---- - -The `SHOW CREATE TABLE` [statement](sql-statements.html) shows the `CREATE TABLE` statement that would create a carbon copy of the specified table. - - -## Required Privileges - -The user must have any [privilege](privileges.html) on the target table. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_create_table.html %} - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | The name of the table for which to show the `CREATE TABLE` statement. - -## Response - -Field | Description -------|------------ -`Table` | The name of the table. -`CreateTable` | The [`CREATE TABLE`](create-table.html) statement for creating a carbon copy of the specified table. - -## Example - -~~~ sql -> CREATE TABLE orders ( - id INT PRIMARY KEY DEFAULT unique_rowid(), - date TIMESTAMP NOT NULL, - priority INT DEFAULT 1, - customer_id INT UNIQUE, - status STRING DEFAULT 'open', - CHECK (priority BETWEEN 1 AND 5), - CHECK (status in ('open', 'in progress', 'done', 'cancelled')), - FAMILY (id, date, priority, customer_id, status) -); - -> SHOW CREATE TABLE orders; -~~~ -~~~ -+--------+--------------------------------------------------------------------------------------------------+ -| Table | CreateTable | -+--------+--------------------------------------------------------------------------------------------------+ -| orders | CREATE TABLE orders ( | -| | id INT NOT NULL DEFAULT unique_rowid(), | -| | date TIMESTAMP NOT NULL, | -| | priority INT NULL DEFAULT 1, | -| | customer_id INT NULL, | -| | status STRING NULL DEFAULT 'open', | -| | CONSTRAINT "primary" PRIMARY KEY (id), | -| | UNIQUE INDEX orders_customer_id_key (customer_id), | -| | FAMILY fam_0_id_date_priority_customer_id_status (id, date, priority, customer_id, status), | -| | CHECK (priority BETWEEN 1 AND 5), | -| | CHECK (status IN ('open', 'in progress', 'done', 'cancelled')) | -| | ) | -+--------+--------------------------------------------------------------------------------------------------+ -(1 row) -~~~ - -## See Also - -- [`CREATE TABLE`](create-table.html) -- [Information Schema](information-schema.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/show-create-view.md b/src/current/v1.0/show-create-view.md deleted file mode 100644 index 0d233b11667..00000000000 --- a/src/current/v1.0/show-create-view.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: SHOW CREATE VIEW -summary: The SHOW CREATE VIEW statement shows the CREATE VIEW statement that would create a carbon copy of the specified view. -toc: true ---- - -The `SHOW CREATE VIEW` [statement](sql-statements.html) shows the `CREATE VIEW` statement that would create a carbon copy of the specified [view](views.html). - - -## Required Privileges - -The user must have any [privilege](privileges.html) on the target view. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_create_view.html %} - -## Parameters - -Parameter | Description -----------|------------ -`view_name` | The name of the view for which to show the `CREATE VIEW` statement. - -## Response - -Field | Description -------|------------ -`View` | The name of the view. -`CreateView` | The [`CREATE VIEW`](create-view.html) statement for creating a carbon copy of the specified view. - -## Examples - -### Show the `CREATE VIEW` statement for a view - -~~~ sql -> SHOW CREATE VIEW bank.user_accounts; -~~~ - -~~~ -+--------------------+---------------------------------------------------------------------------+ -| View | CreateView | -+--------------------+---------------------------------------------------------------------------+ -| bank.user_accounts | CREATE VIEW "bank.user_accounts" AS SELECT type, email FROM bank.accounts | -+--------------------+---------------------------------------------------------------------------+ -(1 row) -~~~ - -### Show just a view's `SELECT` statement - -To get just a view's `SELECT` statement, you can query the `views` table in the built-in `information_schema` database and filter on the view name: - -~~~ sql -> SELECT view_definition - FROM information_schema.views - WHERE table_name = 'user_accounts'; -~~~ - -~~~ -+---------------------------------------+ -| view_definition | -+---------------------------------------+ -| SELECT type, email FROM bank.accounts | -+---------------------------------------+ -(1 row) -~~~ - -## See Also - -- [Views](views.html) -- [`CREATE VIEW`](create-view.html) -- [`ALTER VIEW`](alter-view.html) -- [`DROP VIEW`](drop-view.html) -- [Information Schema](information-schema.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/show-databases.md b/src/current/v1.0/show-databases.md deleted file mode 100644 index 0d0b73edff3..00000000000 --- a/src/current/v1.0/show-databases.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: SHOW DATABASES -summary: The SHOW DATABASES statement lists all database in the CockroachDB cluster. -keywords: reflection -toc: true ---- - -The `SHOW DATABASES` [statement](sql-statements.html) lists all database in the CockroachDB cluster. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_databases.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to list the databases in the CockroachDB cluster. - -## Example - -~~~ sql -> SHOW DATABASES; -~~~ -~~~ -+--------------------+ -| Database | -+--------------------+ -| bank | -| crdb_internal | -| information_schema | -| pg_catalog | -| system | -+--------------------+ -(5 rows) -~~~ - -## See Also - -- [Information Schema](information-schema.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/show-grants.md b/src/current/v1.0/show-grants.md deleted file mode 100644 index 9712944bc1c..00000000000 --- a/src/current/v1.0/show-grants.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: SHOW GRANTS -summary: The SHOW GRANTS statement lists the privileges granted to users. -keywords: reflection -toc: true ---- - -The `SHOW GRANTS` [statement](sql-statements.html) lists the [privileges](privileges.html) granted to users. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_grants.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to view privileges granted to users. - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | A comma-separated list of table names. Alternately, to list privileges for all tables, use `*`. -`database_name` | A comma-separated list of database names. -`user_name` | An optional, comma-separated list of grantees. - -## Examples - -### Show grants on databases - -**Specific database, all users:** - -~~~ sql -> SHOW GRANTS ON DATABASE db2: -~~~ - -~~~ shell -+----------+------------+------------+ -| Database | User | Privileges | -+----------+------------+------------+ -| db2 | betsyroach | CREATE | -| db2 | root | ALL | -+----------+------------+------------+ -(2 rows) -~~~ - -**Specific database, specific user:** - -~~~ sql -> SHOW GRANTS ON DATABASE db2 FOR betsyroach; -~~~ - -~~~ shell -+----------+------------+------------+ -| Database | User | Privileges | -+----------+------------+------------+ -| db2 | betsyroach | CREATE | -+----------+------------+------------+ -(1 row) -~~~ - -### Show grants on tables - -**Specific tables, all users:** - -~~~ sql -> SHOW GRANTS ON TABLE db1.t1, db1.t2*; -~~~ - -~~~ shell -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | betsyroach | DELETE | -| t1 | henryroach | DELETE | -| t1 | maxroach | DELETE | -| t1 | root | ALL | -| t1 | sallyroach | DELETE | -| t2 | betsyroach | DELETE | -| t2 | henryroach | DELETE | -| t2 | maxroach | DELETE | -| t2 | root | ALL | -| t2 | sallyroach | DELETE | -+-------+------------+------------+ -(10 rows) -~~~ - -**Specific tables, specific users:** - -~~~ sql -> SHOW GRANTS ON TABLE db.t1, db.t2 FOR maxroach, betsyroach; -~~~ -~~~ shell -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | betsyroach | DELETE | -| t1 | maxroach | DELETE | -| t2 | betsyroach | DELETE | -| t2 | maxroach | DELETE | -+-------+------------+------------+ -(4 rows) -~~~ - -**All tables, all users:** - -~~~ sql -> SHOW GRANTS ON TABLE db1.*; -~~~ - -~~~ shell -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | betsyroach | DELETE | -| t1 | henryroach | DELETE | -| t1 | maxroach | DELETE | -| t1 | root | ALL | -| t1 | sallyroach | DELETE | -| t2 | betsyroach | DELETE | -| t2 | henryroach | DELETE | -| t2 | maxroach | DELETE | -| t2 | root | ALL | -| t2 | sallyroach | DELETE | -| t3 | root | ALL | -| t4 | maxroach | CREATE | -| t4 | root | ALL | -| t5 | maxroach | CREATE | -| t5 | root | ALL | -+-------+------------+------------+ -(15 rows) -~~~ - -**All tables, specific users:** - -~~~ sql -> SHOW GRANTS ON TABLE db1.* FOR maxroach, betsyroach; -~~~ - -~~~ shell -+-------+------------+------------+ -| Table | User | Privileges | -+-------+------------+------------+ -| t1 | betsyroach | DELETE | -| t1 | maxroach | DELETE | -| t2 | betsyroach | DELETE | -| t2 | maxroach | DELETE | -| t4 | maxroach | CREATE | -| t5 | maxroach | CREATE | -+-------+------------+------------+ -(6 rows) -~~~ - -## See Also - -- [`GRANT`](grant.html) -- [`REVOKE`](revoke.html) -- [Privileges](privileges.html) -- [Information Schema](information-schema.html) - diff --git a/src/current/v1.0/show-index.md b/src/current/v1.0/show-index.md deleted file mode 100644 index 21d07715810..00000000000 --- a/src/current/v1.0/show-index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: SHOW INDEX -summary: The SHOW INDEX statement returns index information for a table. -toc: true ---- - -The `SHOW INDEX` [statement](sql-statements.html) returns index information for a table. - - -## Required Privileges - -The user must have any [privilege](privileges.html) on the target table. - -## Aliases - -In CockroachDB, the following are aliases for `SHOW INDEX`: - -- `SHOW INDEXES` -- `SHOW KEYS` - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_index.html %} - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | The name of the table for which you want to show indexes. - -## Response - -The following fields are returned for each column in each index. - -Field | Description -----------|------------ -`Table` | The name of the table. -`Name` | The name of the index. -`Unique` | Whether or not values in the indexed column are unique. Possible values: `true` or `false`. -`Seq` | The position of the column in the index, starting with 1. -`Column` | The indexed column. -`Direction` | How the column is sorted in the index. Possible values: `ASC` or `DESC` for indexed columns; `N/A` for stored columns. -`Storing` | Whether or not the `STORING` clause was used to index the column during [index creation](create-index.html). Possible values: `true` or `false`. -`Implicit` | Whether or not the column is part of the index despite not being explicitly included during [index creation](create-index.html). Possible values: `true` or `false`

    At this time, [primary key](primary-key.html) columns are the only columns that get implicitly included in secondary indexes. The inclusion of primary key columns improves performance when retrieving columns not in the index. - -## Examples - -~~~ sql -> CREATE TABLE t1 ( - a INT PRIMARY KEY, - b DECIMAL, - c TIMESTAMP, - d STRING - ); - -> CREATE INDEX b_c_idx ON t1 (b, c) STORING (d); - -> SHOW INDEX FROM t1; -~~~ - -~~~ -+-------+---------+--------+-----+--------+-----------+---------+----------+ -| Table | Name | Unique | Seq | Column | Direction | Storing | Implicit | -+-------+---------+--------+-----+--------+-----------+---------+----------+ -| t1 | primary | true | 1 | a | ASC | false | false | -| t1 | b_c_idx | false | 1 | b | ASC | false | false | -| t1 | b_c_idx | false | 2 | c | ASC | false | false | -| t1 | b_c_idx | false | 3 | d | N/A | true | false | -| t1 | b_c_idx | false | 4 | a | ASC | false | true | -+-------+---------+--------+-----+--------+-----------+---------+----------+ -(5 rows) -~~~ - -## See Also - -- [`CREATE INDEX`](create-index.html) -- [`DROP INDEX`](drop-index.html) -- [`RENAME INDEX`](rename-index.html) -- [Information Schema](information-schema.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/show-tables.md b/src/current/v1.0/show-tables.md deleted file mode 100644 index 7757a675f03..00000000000 --- a/src/current/v1.0/show-tables.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: SHOW TABLES -summary: The SHOW TABLES statement lists the tables in a database. -keywords: reflection -toc: true ---- - -The `SHOW TABLES` [statement](sql-statements.html) lists the tables in a database. Tables can be standard tables as well as virtual tables representing [views](views.html). - -{{site.data.alerts.callout_info}}While a table is being dropped, SHOW TABLES will list the table with a (dropped) suffix.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_tables.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to list the tables in a database. - -## Parameters - -Parameter | Description -----------|------------ -`name` | The name of the database for which to show tables. This is optional when showing tables in the default database. - -## Examples - -### Show tables in the default database - -This example assumes that the `bank` database has been set as the default database for the session, either via the [`SET`](set-vars.html) statement or in the client's connection string. - -~~~ sql -> SHOW TABLES; -~~~ - -~~~ -+---------------+ -| Table | -+---------------+ -| accounts | -| user_accounts | -+---------------+ -(2 rows) -~~~ - -### Show tables in a non-default database - -~~~ sql -> SHOW TABLES FROM startrek; -~~~ - -~~~ -+-------------------+ -| Table | -+-------------------+ -| episodes | -| quotes | -| quotes_per_season | -+-------------------+ -(3 rows) -~~~ - -## See Also - -- [`CREATE TABLE`](create-table.html) -- [`CREATE VIEW`](create-view.html) -- [Information Schema](information-schema.html) diff --git a/src/current/v1.0/show-users.md b/src/current/v1.0/show-users.md deleted file mode 100644 index c6b3cc40db1..00000000000 --- a/src/current/v1.0/show-users.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: SHOW USERS -summary: The SHOW USERS statement lists the users for all databases. -toc: true ---- - -The `SHOW USERS` [statement](sql-statements.html) lists the users for all databases. - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_users.html %} - -## Required Privileges - -No [privileges](privileges.html) are required to list users. - -## Example - -~~~ sql -> SHOW USERS; -~~~ - -~~~ -+------------+ -| username | -+------------+ -| jpointsman | -| maxroach | -| root | -+------------+ -~~~ - -## See Also - -- [`CREATE USER`](create-user.html) -- [Create and Manage Users](create-and-manage-users.html) diff --git a/src/current/v1.0/show-vars.md b/src/current/v1.0/show-vars.md deleted file mode 100644 index 9b5050eafb6..00000000000 --- a/src/current/v1.0/show-vars.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: SHOW (session settings) -summary: The SHOW statement displays the current settings for the client session. -toc: true ---- - -The `SHOW` [statement](sql-statements.html) can display the value of either one or all of -the session setting variables. Some of these can also be configured via [`SET`](set-vars.html). - - -## Required Privileges - -No [privileges](privileges.html) are required to display the session settings. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/show_var.html %} - -{{site.data.alerts.callout_info}}The SHOW statement for session settings is unrelated to the other SHOW statements: SHOW CLUSTER SETTING, SHOW CREATE TABLE, SHOW CREATE VIEW, SHOW USERS, SHOW DATABASES, SHOW COLUMNS, SHOW GRANTS, and SHOW CONSTRAINTS.{{site.data.alerts.end}} - -## Parameters - -The `SHOW ` statement accepts a single parameter: the variable name. - -The variable name is case insensitive. -It may be enclosed in double quotes; this is useful if the variable name itself contains spaces. - -### Supported variables - -| Variable name | Description | Initial value | Can be modified with [`SET`](set-vars.html)? | -|---------------------------------|-------------------------------------------------|---------------|-----------------------------------------------| -| `database` | The default database for the current session. | Database in connection string, or empty if not specified. | Yes | -| `search_path` | A list of databases or namespaces that will be searched to resolve unqualified table or function names. For more details, see [Name Resolution](sql-name-resolution.html). | `{pg_catalog}` (for ORM compatibility). | Yes | -| `session_user` | The user connected for the current session. | User in connection string. | No | -| `time zone` | The default time zone for the current session | `UTC` | Yes | -| `default_transaction_isolation` | The default transaction isolation level for the current session. See [Transaction parameters](transactions.html#transaction-parameters) for more details. | Settings in connection string, or "`SERIALIZABLE`" if not specified. | Yes | -| `transaction isolation level` | The isolation level of the current transaction. See [Transaction parameters](transactions.html#transaction-parameters) for more details. | `SERIALIZABLE` | Yes | -| `transaction priority` | The priority of the current transaction. See [Transaction parameters](transactions.html#transaction-parameters) for more details. | `NORMAL` | Yes | -| `transaction status` | The state of the current transaction. See [Transactions](transactions.html) for more details. | `NoTxn` | No | -| `server_version` | The version of PostgreSQL that CockroachDB emulates. | Version-dependent. | No | -| `client_min_messages` | (Reserved; exposed only for ORM compatibility.) | (Reserved) | No | -| `client_encoding` | (Reserved; exposed only for ORM compatibility.) | (Reserved) | No | -| `extra_float_digits` | (Reserved; exposed only for ORM compatibility.) | (Reserved) | No | -| `max_index_keys` | (Reserved; exposed only for ORM compatibility.) | (Reserved) | No | -| `standard_conforming_strings` | (Reserved; exposed only for ORM compatibility.) | (Reserved) | No | - -Special syntax cases supported for compatibility: - -| Syntax | Equivalent to | -|--------|---------------| -| `SHOW TRANSACTION PRIORITY` | `SHOW "transaction priority"` | -| `SHOW TRANSACTION ISOLATION LEVEL` | `SHOW "transaction isolation level"` | -| `SHOW TIME ZONE` | `SHOW "time zone"` | -| `SHOW TRANSACTION STATUS` | `SHOW "transaction status"` | - -## Examples - -### Showing the Value of a Single Session Variable - -~~~ sql -> SHOW DATABASE; -~~~ - -~~~ -+----------+ -| database | -+----------+ -| test | -+----------+ -(1 row) -~~~ - -### Showing the Value of all Session Variables - -~~~ sql -> SHOW ALL; -~~~ - -~~~ -+-------------------------------+--------------+ -| Variable | Value | -+-------------------------------+--------------+ -| application_name | | -| client_encoding | UTF8 | -| client_min_messages | | -| database | | -| default_transaction_isolation | SERIALIZABLE | -| distsql | off | -| extra_float_digits | | -| max_index_keys | 32 | -| search_path | pg_catalog | -| server_version | 9.5.0 | -| session_user | root | -| standard_conforming_strings | on | -| time zone | UTC | -| transaction isolation level | SERIALIZABLE | -| transaction priority | NORMAL | -| transaction status | NoTxn | -+-------------------------------+--------------+ -(16 rows) -~~~ - -## See Also - -- [`SET` (session variable)](set-vars.html) -- [Transactions](transactions.html) and [Transaction parameters](transactions.html#transaction-parameters) -- [`SHOW CLUSTER SETTING`](show-cluster-setting.html) -- [`SHOW COLUMNS`](show-columns.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) -- [`SHOW CREATE TABLE`](show-create-table.html) -- [`SHOW CREATE VIEW`](show-create-view.html) -- [`SHOW DATABASES`](show-databases.html) -- [`SHOW GRANTS`](show-grants.html) -- [`SHOW INDEX`](show-index.html) -- [`SHOW USERS`](show-users.html) diff --git a/src/current/v1.0/simplified-deployment.md b/src/current/v1.0/simplified-deployment.md deleted file mode 100644 index 29b1210275e..00000000000 --- a/src/current/v1.0/simplified-deployment.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Simplified Deployment -summary: Deploying CockroachDB is simple and straightforward. -toc: false ---- - -Deploying and maintaining databases has forever been a difficult and expensive prospect. Simplicity is one of our foremost design goals. CockroachDB is self contained and eschews external dependencies. There are no explicit roles like primaries or secondaries to get in the way. Instead, every CockroachDB node is symmetric and equally important, meaning no single points of failure in the architecture. - -- No external dependencies -- Self-organizes using gossip network -- Dead-simple configuration without “knobs” -- Symmetric nodes are ideally suited to container-based deployments -- Every node provides access to centralized admin console - -CockroachDB is simple to deploy diff --git a/src/current/v1.0/sql-constants.md b/src/current/v1.0/sql-constants.md deleted file mode 100644 index 01fa69828bd..00000000000 --- a/src/current/v1.0/sql-constants.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: Constant Values -summary: SQL Constants represent a simple value that doesn't change. -toc: true ---- - -SQL Constants represent a simple value that doesn't change. - - -## Introduction - -There are five categories of constants in CockroachDB: - -- [String literals](#string-literals): these define string values but their actual data type will - be inferred from context, for example, `'hello'`. -- [Numeric literals](#numeric-literals): these define numeric values but their actual data - type will be inferred from context, for example, `-12.3`. -- [Byte array literals](#byte-array-literals): these define byte array values with data type - `BYTES`, for example, `b'hello'`. -- [Interpreted literals](#interpreted-literals): these define arbitrary values with an explicit - type, for example, `INTERVAL '3 days'`. -- [Named constants](#named-constants): these have predefined values with a predefined - type, for example, `TRUE` or `NULL`. - -## String literals - -CockroachDB supports two formats for string literals: - -- [Standard SQL string literals](#standard-sql-string-literals). -- [String literals with C escape sequences](#string-literals-with-character-escapes). - -These format also allow arbitrary Unicode characters encoded as UTF-8. - -In any case, the actual data type of a string literal is determined -using the context where it appears. - -For example: - -| Expression | Data type of the string literal | -|------------|---------------------------------| -| `length('hello')` | `STRING` | -| `now() + '3 day'` | `INTERVAL` | -| `INSERT INTO tb(date_col) VALUES ('2013-01-02')` | `DATE` | - -In general, the data type of a string literal is that demanded by the -context if there is no ambiguity, or `STRING` otherwise. - -Check our blog for -[more information about the typing of string literals](https://www.cockroachlabs.com/blog/revisiting-sql-typing-in-cockroachdb/). - -### Standard SQL string literals - -SQL string literals are formed by an arbitrary sequence of characters -enclosed between single quotes (`'`), for example, `'hello world'`. - -To include a single quote in the string, use a double single quote. -For example: - -~~~sql -> SELECT 'hello' as a, 'it''s a beautiful day' as b; -~~~ -~~~ -+-------+----------------------+ -| a | b | -+-------+----------------------+ -| hello | it's a beautiful day | -+-------+----------------------+ -~~~ - -For compatibility with the SQL standard, CockroachDB also recognizes -the following special syntax: two simple string literals separated by -a newline character are automatically concatenated together to form a -single constant. For example: - -~~~sql -> SELECT 'hello' -' world!' as a; -~~~ -~~~ -+--------------+ -| a | -+--------------+ -| hello world! | -+--------------+ -~~~ - -This special syntax only works if the two simple literals are -separated by a newline character. For example `'hello' ' world!'` -doesn't work. This is mandated by the SQL standard. - -### String literals with character escapes - -CockroachDB also supports string literals containing escape sequences -like in the programming language C. These are constructed by prefixing -the string literal with the letter `e`, for example, -`e'hello\nworld!'`. - -The following escape sequences are supported: - -Escape Sequence | Interpretation -----------------|--------------- -`\a` | ASCII code 7 (BEL) -`\b` | backspace (ASCII 8) -`\t` | tab (ASCII 9) -`\n` | newline (ASCII 10) -`\v` | vertical tab (ASCII 11) -`\f` | form feed (ASCII 12) -`\r` | carriage return (ASCII 13) -`\xHH` | hexadecimal byte value -`\ooo` | octal byte value -`\uXXXX` | 16-bit hexadecimal Unicode character value -`\UXXXXXXXX` | 32-bit hexadecimal Unicode character value - -For example, the `e'x61\141\u0061'` escape string represents the -hexadecimal byte, octal byte, and 16-bit hexadecimal Unicode character -values equivalent to the `'aaa'` string literal. - -## Numeric literals - -Numeric literals can have the following forms: - -~~~ -[+-]9999 -[+-]9999.[9999][e[+-]999] -[+-][9999].9999[e[+-]999] -[+-]9999e[+-]999 -[+-]0xAAAA -~~~ - -Some examples: - -~~~ -+4269 -3.1415 --.001 -6.626e-34 -50e6 -0xcafe111 -~~~ - -The actual data type of a numeric constant depends both on the context -where it is used, its literal format, and its numeric value. - -| Syntax | Possible data types | -|--------|---------------------| -| Contains a decimal separator | `FLOAT`, `DECIMAL` | -| Contains an exponent | `FLOAT`, `DECIMAL` | -| Contains a value outside of the range -2^63...(2^63)-1 | `FLOAT`, `DECIMAL` | -| Otherwise | `INT`, `DECIMAL`, `FLOAT` | - -Of the possible data types, which one is actually used is then further -refined depending on context. - -Check our blog for -[more information about the typing of numeric literals](https://www.cockroachlabs.com/blog/revisiting-sql-typing-in-cockroachdb/). - -## Byte array literals - -CockroachDB supports two formats for byte array literals: - -- [Byte array literals with C escape sequences](#byte-array-literals-with-character-escapes) -- [Hexadecimal-encoded byte array literals](#hexadecimal-encoded-byte-array-literals) - -### Byte array literals with character escapes - -This uses the same syntax as [string literals containing character escapes](#string-literals-with-character-escapes), -using a `b` prefix instead of `e`. Any character escapes are interpreted like they -would be for string literals. - -For example: `b'hello,\x32world'` - -The two differences between byte array literals and string literals -with character escapes are as follows: - -- Byte array literals always have data type `BYTES`, whereas the data - type of a string literal depends on context. -- Byte array literals may contain invalid UTF-8 byte sequences, - whereas string literals must always contain valid UTF-8 sequences. - -### Hexadecimal-encoded byte array literals - -This is a CockroachDB-specific extension to express byte array -literals: the delimiter `x'` followed by an arbitrary sequence of -hexadecimal digits, followed by a closing `'`. - -For example, both `x'636174'` and `X'636174'` are equivalent to `b'cat'`. - -This feature is inspired from MySQL. - -## Interpreted literals - -A constant of any data type can be formed using either of the following formats: - -~~~ -type 'string' -'string':::type -~~~ - -The value of the string part is used as input for the conversion function to the -specified data type, and the result is used as a constant with that data type. - -Examples: - -~~~ -DATE '2013-12-23' -BOOL 'FALSE' -'42.69':::INT -'TRUE':::BOOL -'3 days':::INTERVAL -~~~ - -Additionally, for compatibility with PostgreSQL, the notation -`'string'::type` and `CAST('string' AS type)` is also recognized as an -interpreted literal. These are special cases of -[cast expressions](sql-expressions.html). - -For more information about the allowable format of interpreted -literals, refer to the "Syntax" section of the respective data types: -[`DATE`](date.html#syntax), [`INTERVAL`](interval.html#syntax), -[`TIMESTAMP`/`TIMESTAMPTZ`](timestamp.html#syntax). - -## Named constants - -CockroachDB recognizes the following SQL named constants: - -- `TRUE` and `FALSE`, the two possible values of data type `BOOL`. -- `NULL`, the special SQL symbol that indicates "no value present". - -Note that `NULL` is a valid constant for any type: its actual data -type during expression evaluation is determined based on context. - -## See Also - -- [Value Expressions](sql-expressions.html) -- [Data Types](data-types.html) diff --git a/src/current/v1.0/sql-dump.md b/src/current/v1.0/sql-dump.md deleted file mode 100644 index ae079de810b..00000000000 --- a/src/current/v1.0/sql-dump.md +++ /dev/null @@ -1,349 +0,0 @@ ---- -title: SQL Dump (Export) -summary: Learn how to dump schemas and data from a CockroachDB cluster. -toc: true ---- - -The `cockroach dump` [command](cockroach-commands.html) outputs the SQL statements required to recreate one or more tables and all their rows (also known as a *dump*). This command can be used to back up or export each database in a cluster. The output should also be suitable for importing into other relational databases, with minimal adjustments. - -{{site.data.alerts.callout_success}}CockroachDB enterprise license users can also back up their cluster's data using BACKUP.{{site.data.alerts.end}} - -When `cockroach dump` is executed: - -- Table schemas and data are dumped as they appeared at the time that the command is started. Any changes after the command starts will not be included in the dump. -- If the dump takes longer than the [`ttlseconds`](configure-replication-zones.html) replication setting for the table (24 hours by default), the dump may fail. -- Reads, writes, and schema changes can happen while the dump is in progress, but will not affect the output of the dump. - -{{site.data.alerts.callout_info}}The user must have the SELECT privilege on the target table(s).{{site.data.alerts.end}} - - -## Synopsis - -~~~ shell -# Dump the schemas and data of specific tables to stdout: -$ cockroach dump - -# Dump just the data of specific tables to stdout: -$ cockroach dump
    --dump-mode=data - -# Dump just the schemas of specific tables to stdout: -$ cockroach dump
    --dump-mode=schema - -# Dump the schemas and data of all tables in a database to stdout: -$ cockroach dump - -# Dump just the schemas of all tables in a database to stdout: -$ cockroach dump --dump-mode=schema - -# Dump just the data of all tables in a database to stdout: -$ cockroach dump --dump-mode=data - -# Dump to a file: -$ cockroach dump
    > dump-file.sql - -# View help: -$ cockroach dump --help -~~~ - -## Flags - -The `dump` command supports the following [general-use](#general) and [logging](#logging) flags. - -### General - -Flag | Description ------|------------ -`--as-of` | Dump table schemas and/or data as they appear at the specified [timestamp](timestamp.html). See this [example](#dump-table-data-as-of-a-specific-time) for a demonstration.

    Note that historical data is available only within the garbage collection window, which is determined by the [`ttlseconds`](configure-replication-zones.html) replication setting for the table (24 hours by default). If this timestamp is earlier than that window, the dump will fail.

    **Default:** Current time -`--certs-dir` | The path to the [certificate directory](create-security-certificates.html). The directory must contain valid certificates if running in secure mode.

    **Env Variable:** `COCKROACH_CERTS_DIR`
    **Default:** `${HOME}/.cockroach-certs/` -`--dump-mode` | Whether to dump table schemas, table data, or both.

    To dump just table schemas, set this to `schema`. To dump just table data, set this to `data`. To dump both table schemas and data, leave this flag out or set it to `both`.

    Table and view schemas are ordered alphabetically by name. This is not always an ordering in which the tables and views can be successfully recreated. Also, the schemas of views are dumped incorrectly as `CREATE TABLE` statements, and attempting to dump the data of a view results in an error. For more details and workarounds, see the corresponding [known limitations](known-limitations.html#order-of-dumped-schemas-and-incorrect-schemas-of-dumped-views). Note that these limitations have been resolved in v1.1.

    **Default:** `both` -`--host` | The server host to connect to. This can be the address of any node in the cluster.

    **Env Variable:** `COCKROACH_HOST`
    **Default:** `localhost` -`--insecure` | Run in insecure mode. If this flag is not set, the `--certs-dir` flag must point to valid certificates.

    **Env Variable:** `COCKROACH_INSECURE`
    **Default:** `false` -`--port`
    `-p` | The server port to connect to.

    **Env Variable:** `COCKROACH_PORT`
    **Default:** `26257` -`--url` | The connection URL. If you use this flag, do not set any other connection flags.

    For insecure connections, the URL format is:
    `--url=postgresql://@:/?sslmode=disable`

    For secure connections, the URL format is:
    `--url=postgresql://@:/`
    with the following parameters in the query string:
    `sslcert=`
    `sslkey=`
    `sslmode=verify-full`
    `sslrootcert=`

    **Env Variable:** `COCKROACH_URL` -`--user`
    `-u` | The [user](create-and-manage-users.html) executing the `dump` command. The user must have the `SELECT` privilege on the target table.

    **Default:** `root` - -### Logging - -By default, the `dump` command logs errors to `stderr`. - -If you need to troubleshoot this command's behavior, you can change its [logging behavior](debug-and-error-logs.html). - -## Examples - -{{site.data.alerts.callout_info}}These examples use our sample startrek database, which you can add to a cluster via the cockroach gen command. Also, the examples assume that the maxroach user has been granted the SELECT privilege on all target tables. {{site.data.alerts.end}} - -### Dump a table's schema and data - -~~~ shell -$ cockroach dump startrek episodes --insecure --user=maxroach > backup.sql -~~~ - -~~~ shell -$ cat backup.sql -~~~ - -~~~ -CREATE TABLE episodes ( - id INT NOT NULL, - season INT NULL, - num INT NULL, - title STRING NULL, - stardate DECIMAL NULL, - CONSTRAINT "primary" PRIMARY KEY (id), - FAMILY "primary" (id, season, num), - FAMILY fam_1_title (title), - FAMILY fam_2_stardate (stardate) -); - -INSERT INTO episodes (id, season, num, title, stardate) VALUES - (1, 1, 1, 'The Man Trap', 1531.1), - (2, 1, 2, 'Charlie X', 1533.6), - (3, 1, 3, 'Where No Man Has Gone Before', 1312.4), - (4, 1, 4, 'The Naked Time', 1704.2), - (5, 1, 5, 'The Enemy Within', 1672.1), - (6, 1, 6, e'Mudd\'s Women', 1329.8), - (7, 1, 7, 'What Are Little Girls Made Of?', 2712.4), - (8, 1, 8, 'Miri', 2713.5), - (9, 1, 9, 'Dagger of the Mind', 2715.1), - (10, 1, 10, 'The Corbomite Maneuver', 1512.2), - ... -~~~ - -### Dump just a table's schema - -~~~ shell -$ cockroach dump startrek episodes --insecure --user=maxroach --dump-mode=schema > backup.sql -~~~ - -~~~ shell -$ cat backup.sql -~~~ - -~~~ -CREATE TABLE episodes ( - id INT NOT NULL, - season INT NULL, - num INT NULL, - title STRING NULL, - stardate DECIMAL NULL, - CONSTRAINT "primary" PRIMARY KEY (id), - FAMILY "primary" (id, season, num), - FAMILY fam_1_title (title), - FAMILY fam_2_stardate (stardate) -); -~~~ - -### Dump just a table's data - -~~~ shell -$ cockroach dump startrek episodes --insecure --user=maxroach --dump-mode=data > backup.sql -~~~ - -~~~ shell -$ cat backup.sql -~~~ - -~~~ -INSERT INTO episodes (id, season, num, title, stardate) VALUES - (1, 1, 1, 'The Man Trap', 1531.1), - (2, 1, 2, 'Charlie X', 1533.6), - (3, 1, 3, 'Where No Man Has Gone Before', 1312.4), - (4, 1, 4, 'The Naked Time', 1704.2), - (5, 1, 5, 'The Enemy Within', 1672.1), - (6, 1, 6, e'Mudd\'s Women', 1329.8), - (7, 1, 7, 'What Are Little Girls Made Of?', 2712.4), - (8, 1, 8, 'Miri', 2713.5), - (9, 1, 9, 'Dagger of the Mind', 2715.1), - (10, 1, 10, 'The Corbomite Maneuver', 1512.2), - ... -~~~ - -### Dump all tables in a database - -{{site.data.alerts.callout_info}}Table and view schemas are ordered alphabetically by name. This is not always an ordering in which the tables and views can be successfully recreated. Also, the schemas of views are dumped incorrectly as CREATE TABLE statements, and attempting to dump the data of a view results in an error. For more details and workarounds, see the corresponding known limitations. Note that these limitations have been resolved in v1.1.{{site.data.alerts.end}} - -~~~ shell -$ cockroach dump startrek --insecure --user=maxroach > backup.sql -~~~ - -~~~ shell -$ cat backup.sql -~~~ - -~~~ -CREATE TABLE episodes ( - id INT NOT NULL, - season INT NULL, - num INT NULL, - title STRING NULL, - stardate DECIMAL NULL, - CONSTRAINT "primary" PRIMARY KEY (id), - FAMILY "primary" (id, season, num), - FAMILY fam_1_title (title), - FAMILY fam_2_stardate (stardate) -); - -CREATE TABLE quotes ( - quote STRING NULL, - characters STRING NULL, - stardate DECIMAL NULL, - episode INT NULL, - INDEX quotes_episode_idx (episode), - FAMILY "primary" (quote, rowid), - FAMILY fam_1_characters (characters), - FAMILY fam_2_stardate (stardate), - FAMILY fam_3_episode (episode) -); - -INSERT INTO episodes (id, season, num, title, stardate) VALUES - (1, 1, 1, 'The Man Trap', 1531.1), - (2, 1, 2, 'Charlie X', 1533.6), - (3, 1, 3, 'Where No Man Has Gone Before', 1312.4), - (4, 1, 4, 'The Naked Time', 1704.2), - (5, 1, 5, 'The Enemy Within', 1672.1), - (6, 1, 6, e'Mudd\'s Women', 1329.8), - (7, 1, 7, 'What Are Little Girls Made Of?', 2712.4), - (8, 1, 8, 'Miri', 2713.5), - (9, 1, 9, 'Dagger of the Mind', 2715.1), - (10, 1, 10, 'The Corbomite Maneuver', 1512.2), - ... - -INSERT INTO quotes (quote, characters, stardate, episode) VALUES - ('"... freedom ... is a worship word..." "It is our worship word too."', 'Cloud William and Kirk', NULL, 52), - ('"Beauty is transitory." "Beauty survives."', 'Spock and Kirk', NULL, 72), - ('"Can you imagine how life could be improved if we could do away with jealousy, greed, hate ..." "It can also be improved by eliminating love, tenderness, sentiment -- the other side of the coin"', 'Dr. Roger Corby and Kirk', 2712.4, 7), - ... -~~~ - -### Dump fails (user does not have `SELECT` privilege) - -In this example, the `dump` command fails for a user that does not have the `SELECT` privilege on the `episodes` table. - -~~~ shell -$ cockroach dump startrek episodes --insecure --user=leslieroach > backup.sql -~~~ - -~~~ shell -Error: pq: user leslieroach has no privileges on table episodes -Failed running "dump" -~~~ - -### Restore a table from a backup file - -In this example, a user that has the `CREATE` privilege on the `startrek` database uses the [`cockroach sql`](use-the-built-in-sql-client.html) command to recreate a table, based on a file created by the `dump` command. - -~~~ shell -$ cat backup.sql -~~~ - -~~~ -CREATE TABLE quotes ( - quote STRING NULL, - characters STRING NULL, - stardate DECIMAL NULL, - episode INT NULL, - INDEX quotes_episode_idx (episode), - FAMILY "primary" (quote, rowid), - FAMILY fam_1_characters (characters), - FAMILY fam_2_stardate (stardate), - FAMILY fam_3_episode (episode) -); - -INSERT INTO quotes (quote, characters, stardate, episode) VALUES - ('"... freedom ... is a worship word..." "It is our worship word too."', 'Cloud William and Kirk', NULL, 52), - ('"Beauty is transitory." "Beauty survives."', 'Spock and Kirk', NULL, 72), - ('"Can you imagine how life could be improved if we could do away with jealousy, greed, hate ..." "It can also be improved by eliminating love, tenderness, sentiment -- the other side of the coin"', 'Dr. Roger Corby and Kirk', 2712.4, 7), - ... -~~~ - -~~~ shell -$ cockroach sql --insecure --database=startrek --user=maxroach < backup.sql -~~~ - -~~~ shell -CREATE TABLE -INSERT 100 -INSERT 100 -~~~ - -### Dump table data as of a specific time - -In this example, we assume there were several inserts into a table both before and after `2017-03-07 19:55:00`. - -First, let's use the built-in SQL client to view the table at the current time: - -~~~ shell -$ cockroach sql --insecure --execute="SELECT * FROM db1.dump_test" -~~~ - -~~~ -+--------------------+------+ -| id | name | -+--------------------+------+ -| 225594758537183233 | a | -| 225594758537248769 | b | -| 225594758537281537 | c | -| 225594758537314305 | d | -| 225594758537347073 | e | -| 225594758537379841 | f | -| 225594758537412609 | g | -| 225594758537445377 | h | -| 225594991654174721 | i | -| 225594991654240257 | j | -| 225594991654273025 | k | -| 225594991654305793 | l | -| 225594991654338561 | m | -| 225594991654371329 | n | -| 225594991654404097 | o | -| 225594991654436865 | p | -+--------------------+------+ -(16 rows) -~~~ - -Next, let's use a [time-travel query](select.html#select-historical-data-time-travel) to view the contents of the table as of `2017-03-07 19:55:00`: - -~~~ shell -$ cockroach sql --insecure --execute="SELECT * FROM db1.dump_test AS OF SYSTEM TIME '2017-03-07 19:55:00'" -~~~ - -~~~ -+--------------------+------+ -| id | name | -+--------------------+------+ -| 225594758537183233 | a | -| 225594758537248769 | b | -| 225594758537281537 | c | -| 225594758537314305 | d | -| 225594758537347073 | e | -| 225594758537379841 | f | -| 225594758537412609 | g | -| 225594758537445377 | h | -+--------------------+------+ -(8 rows) -~~~ - -Finally, let's use `cockroach dump` with the `--as-of` flag set to dump the contents of the table as of `2017-03-07 19:55:00`. - -~~~ shell -$ cockroach dump db1 dump_test --insecure --dump-mode=data --as-of='2017-03-07 19:55:00' -~~~ - -~~~ -INSERT INTO dump_test (id, name) VALUES - (225594758537183233, 'a'), - (225594758537248769, 'b'), - (225594758537281537, 'c'), - (225594758537314305, 'd'), - (225594758537347073, 'e'), - (225594758537379841, 'f'), - (225594758537412609, 'g'), - (225594758537445377, 'h'); -~~~ - -As you can see, the results of the dump are identical to the earlier time-travel query. - -## See Also - -- [Import Data](import-data.html) -- [Use the Built-in SQL Client](use-the-built-in-sql-client.html) -- [Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/sql-expressions.md b/src/current/v1.0/sql-expressions.md deleted file mode 100644 index 874453d30d4..00000000000 --- a/src/current/v1.0/sql-expressions.md +++ /dev/null @@ -1,716 +0,0 @@ ---- -title: Value Expressions -summary: Value expressions allow the computation of new values from basic parts. -toc: true ---- - -Most SQL statements can contain *value expressions* that compute new -values from data. For example, in the query `SELECT ceil(price) FROM -items`, the expression `ceil(price)` computes the rounded-up value of -the values from the `price` column. - -Value expressions produce values suitable to store in a single table -cell (one column of one row). They can be contrasted with -[table expressions](table-expressions.html), which produce results -structured as a table. - -The following sections provide details on each of these options. - - -## Constants - -Constant expressions represent a simple value that doesn't change. -They are described further in section [SQL Constants](sql-constants.html). - -## Column References - -An expression in a query can refer to columns in the current data source in two ways: - -- Using the name of the column, e.g., `price` in `SELECT price FROM - items`. - - - If the name of a column is also a - [SQL keyword](keywords-and-identifiers.html#keywords), the name - must be appropriately quoted. For example: `SELECT "Default" FROM - configuration`. - - - If the name is ambiguous (e.g., when joining across multiple - tables), it is possible to disambiguate by prefixing the column - name by the table name. For example, `SELECT items.price FROM - items`. - -- Using the ordinal position of the column. For example, `SELECT @1 - FROM items` selects the first column in `items`. - - *This is a CockroachDB SQL extension.* - - {{site.data.alerts.callout_danger}} - Ordinal references should be used with care in production - code! During schema updates, column ordinal positions can change and - invalidate existing queries that use ordinal positions based on a - previous version of the schema. - {{site.data.alerts.end}} - -## Unary and Binary Operations - -An expression prefixed by a unary operator, or two expressions -separated by a binary operator, form a new expression. - -CockroachDB supports the following operators: - -| Operator | Description | -|----------|-------------| -| `-` (unary) | numeric negation | -| `+` (unary) | no-op, exists only for symmetry with unary `-` | -| `~` (unary) | 64-bit binary complement | -| `NOT` (unary) | boolean/logical negation | -| `+` | addition | -| `-` | substraction | -| `*` | multiplication | -| `/` | numeric division | -| `//` | division with rounding ("integer division") | -| `%` | rest of division ("modulo") | -| `&` | bitwise AND | -| `|` | bitwise OR | -| `^`, `#` | bitwise XOR | -| `<<` | binary shift left | -| `>>` | binary shift right | -| `~` `!~`, `~*`, `!~*` | match using regular expression | -| `||` | concatenation for strings or byte arrays | -| `<`, `>`, `<=`, `>=`, `<>`, `!=`, `IS` | comparison | -| `LIKE`, `ILIKE`, `SIMILAR TO` | match using string pattern | -| `IN` | test for value in set | - -See also [this section over which data types are valid operands -for each operator](functions-and-operators.html#operators). - -### Value Comparisons - -The standard operators `<` (smaller than), `>` (greater than), `<=` -(lower than or equal to), `>=` (greater than or equal to), `=` -(equals), `<>` and `!=` (not equal to), `IS` (identical to), and `IS -NOT` (not identical to) can be applied to any pair of values from a -single data type, as well as some pairs of values from different data -types. - -See also [this section over which data types are valid operands -for each operator](functions-and-operators.html#operators). - -The following special rules apply: - -- `NULL` is always ordered smaller than every other value, even itself. -- `NULL` is never equal to anything via `=`, even `NULL`. To check - whether a value is `NULL`, use the `IS` operator or the conditional - expression `IFNULL(..)`. - -#### Typing rule - -All comparisons accept any combination of argument types and result in type `BOOL`. - -### Set Membership - -Syntax: - -~~~ - IN - IN ( ... subquery ... ) - - NOT IN - NOT IN ( ... subquery ... ) -~~~ - -Returns `TRUE` if and only if the value of the left operand is part of -the result of evaluating the right operand. - -For example: - -~~~sql -> SELECT a IN (1, 2, 3) FROM sometable; -> SELECT a IN (SELECT * FROM allowedvalues) FROM sometable; -> SELECT ('x', 123) IN (SELECT * FROM rows); -~~~ - -#### Typing rule - -`IN` requires its right operand to be a homogeneous tuple type and its left operand -to match the tuple element type. The result has type `BOOL`. - -### String Pattern Matching - -Syntax: - -~~~ - LIKE - ILIKE - NOT LIKE - NOT ILIKE -~~~ - -Evaluates both expressions as strings, then tests whether the string on the left -matches the pattern given on the right. Returns `TRUE` if a match is found -or `FALSE` otherwise, or the inverted value for the `NOT` variants. - -Patterns can contain `_` to match any single -character, or `%` to match any sequence of zero or more characters. -`ILIKE` causes the match to be tested case-insensitively. - -For example: - -~~~sql -> SELECT 'monday' LIKE '%day' AS a, 'tuesday' LIKE 'tue_day' AS b, 'wednesday' ILIKE 'W%' AS c; -~~~ -~~~ -+------+------+------+ -| a | b | c | -+------+------+------+ -| true | true | true | -+------+------+------+ -~~~ - -#### Typing rule - -The operands must be either both `STRING` or both `BYTES`. The result has type `BOOL`. - -### String Matching Using POSIX Regular Expressions - -Syntax: - -~~~ - ~ - ~* - !~ - !~* -~~~ - -Evaluates both expressions as strings, then tests whether the string on the left -matches the pattern given on the right. Returns `TRUE` if a match is found -or `FALSE` otherwise, or the inverted value for the `!` variants. - -The pattern is expressed using -[POSIX regular expression syntax](https://en.wikipedia.org/wiki/Regular_expression). Unlike -`LIKE` patterns, a regular expression is allowed to match anywhere -inside a string, not only at the beginning. - -For example: - -~~~sql -> SELECT 'monday' ~ 'onday' AS a, 'tuEsday' ~ 't[uU][eE]sday' AS b, 'wednesday' ~* 'W.*y' AS c; -~~~ -~~~ -+------+------+------+ -| a | b | c | -+------+------+------+ -| true | true | true | -+------+------+------+ -~~~ - -#### Typing rule - -The operands must be either both `STRING` or both `BYTES`. The result has type `BOOL`. - -### String Matching Using SQL Regular Expressions - -Syntax: - -~~~ - SIMILAR TO - NOT SIMILAR TO -~~~ - -Evaluates both expressions as strings, then tests whether the string on the left -matches the pattern given on the right. Returns `TRUE` if a match is found -or `FALSE` otherwise, or the inverted value for the `NOT` variant. - -The pattern is expressed using the SQL standard's definition of a regular expression. -This is a mix of SQL `LIKE` patterns and POSIX regular expressions: - -- `_` and `%` denote any character or any string, respectively. -- `.` matches specifically the period character, unlike in POSIX where it is a wildcard. -- Most of the other POSIX syntax applies as usual. -- The pattern matches the entire string (as in `LIKE`, unlike POSIX regular expressions). - -For example: - -~~~sql -> SELECT 'monday' SIMILAR TO '_onday' AS a, 'tuEsday' SIMILAR TO 't[uU][eE]sday' AS b, 'wednesday' SIMILAR TO 'w%y' AS c; -~~~ -~~~ -+------+------+------+ -| a | b | c | -+------+------+------+ -| true | true | true | -+------+------+------+ -~~~ - -#### Typing rule - -The operands must be either both `STRING` or both `BYTES`. The result has type `BOOL`. - -### Operator Precedence - -CockroachDB uses the following grouping precedence of -operators in expressions: - -| Level | Operators | -|-------|-----------| -| 1 | `~` (unary) | -| 2 | `-` (unary) | -| 3 | `*`, `/`, `//`, `%` | -| 4 | `+`, `-` (binary) | -| 5 | `<<`, `>>` | -| 6 | `&` | -| 7 | `^`, `#` | -| 8 | `|` | -| 9 | `||` | -| 10 | `IN`, `LIKE`, `ILIKE`, `SIMILAR TO`, `!~`, `!~*` `~*`, `~` (binary) | -| 11 | `<` `>` `=` `<=` `>=` `<>` `!=` | -| 12 | `IS` | -| 13 | `NOT` | - -## Function Calls and SQL Special Forms - -General syntax: - -~~~ - ( ) -~~~ - -A built-in function name followed by an opening parenthesis, followed -by a comma-separated list of expressions, followed by a closing -parenthesis. - -This applies the named function to the arguments between -parentheses. When the function's namespace is not prefixed, the -[name resolution rules](sql-name-resolution.html) determine which -function is called. - -See also [the separate section on supported built-in functions](functions-and-operators.html). - -In addition, the following SQL special forms are also supported: - -| Special form | Equivalent to | -|------------------------------------------------------------|---------------| -| `EXTRACT( FROM )` | `extract("", )` | -| `EXTRACT_DURATION( FROM )` | `extract_duration("", )` | -| `OVERLAY( PLACING FROM FOR )` | `overlay(, , , )` | -| `OVERLAY( PLACING FROM )` | `overlay(, , )` | -| `POSITION( IN )` | `strpos(, )` | -| `SUBSTRING( FROM FOR )` | `substring(, , )` | -| `SUBSTRING( FOR FROM )` | `substring(, , )` | -| `SUBSTRING( FOR )` | `substring(, 1, )` | -| `SUBSTRING( FROM )` | `substring(, )` | -| `TRIM( FROM )` | `btrim(, )` | -| `TRIM(FROM )` | `btrim()` | -| `TRIM(, )` | `btrim(, )` | -| `TRIM(LEADING FROM )` | `ltrim(, )` | -| `TRIM(LEADING FROM )` | `ltrim()` | -| `TRIM(TRAILING FROM )` | `rtrim(, )` | -| `TRIM(TRAILING FROM )` | `rtrim()` | -| `CURRENT_DATE` | `current_date()` | -| `CURRENT_TIMESTAMP` | `current_timestamp()` | - -#### Typing rule - -In general, a function call requires the arguments to be of the types -accepted by the function, and returns a value of the type determined -by the function. - -However, the typing of function calls is complicated by the fact -SQL supports function overloading. [See our blog post for more details](https://www.cockroachlabs.com/blog/revisiting-sql-typing-in-cockroachdb/). - -## Subscripted Expressions - -It is possible to access one item in an array value using the `[` ... `]` operator. - -For example, if the name `a` refers to an array of 10 -values, `a[3]` will retrieve the 3rd value. The first value has index -1. - -If the index is smaller or equal to 0, or larger than the size of the array, then -the result of the subscripted expression is `NULL`. - -#### Typing rule - -The subscripted expression must have an array type; the index expression -must have type `INT`. The result has the element type of the -subscripted expression. - -## Conditional Expressions - -Expressions can test a conditional expression and, depending on whether -or which condition is satisfied, evaluate to one or more additional -operands. - -These expression formats share the following property: some of their -operands are only evaluated if a condition is true. This matters -especially when an operand would be invalid otherwise. For example, -`IF(a=0, 0, x/a)` returns 0 if `a` is 0, and `x/a` otherwise. - -### `IF` Expressions - -Syntax: - -~~~ -IF ( , , ) -~~~ - -Evaluates ``, then evaluates `` if the condition is true, -or `` otherwise. - -The expression corresponding to the case when the condition is false -is not evaluated. - -#### Typing rule - -The condition must have type `BOOL`, and the two remaining expressions -must have the same type. The result has the same type as the -expression that was evaluated. - -### Simple `CASE` Expressions - -Syntax: - -~~~ -CASE - WHEN THEN - [ WHEN THEN ] ... - [ ELSE ] -END -~~~ - -Evaluates ``, then picks the `WHEN` branch where `` is -equal to ``, then evaluates and returns the corresponding `THEN` -expression. If no `WHEN` branch matches, the `ELSE` expression is -evaluated and returned, if any. Otherwise, `NULL` is returned. - -Conditions and result expressions after the first match are not evaluated. - -#### Typing rule - -The condition and the `WHEN` expressions must have the same type. -The `THEN` expressions and the `ELSE` expression, if any, must have the same type. -The result has the same type as the `THEN`/`ELSE` expressions. - -### Searched `CASE` Expressions - -Syntax: - -~~~ -CASE WHEN THEN - [ WHEN THEN ] ... - [ ELSE ] -END -~~~ - -In order, evaluates each `` expression; at the first `` -expression that evaluates to `TRUE`, returns the result of evaluating the -corresponding `THEN` expression. If none of the `` expressions -evaluates to true, then evaluates and returns the value of the `ELSE` -expression, if any, or `NULL` otherwise. - -Conditions and result expressions after the first match are not evaluated. - -#### Typing rule - -All the `WHEN` expressions must have type `BOOL`. -The `THEN` expressions and the `ELSE` expression, if any, must have the same type. -The result has the same type as the `THEN`/`ELSE` expressions. - -### `NULLIF` Expressions - -Syntax: - -~~~ -NULLIF ( , ) -~~~ - -Equivalent to: `IF ( = , NULL, )` - -#### Typing rule - -Both operands must have the same type, which is also the type of the result. - -### `COALESCE` and `IFNULL` Expressions - -Syntax: - -~~~ -IFNULL ( , ) -COALESCE ( [, [, ] ...] ) -~~~ - -`COALESCE` evaluates the first expression first. If its value is not -`NULL`, its value is returned directly. Otherwise, it returns the -result of applying `COALESCE` on the remaining expressions. If all the -expressions are `NULL`, `NULL` is returned. - -Arguments to the right of the first non-null argument are not evaluated. - -`IFNULL(a, b)` is equivalent to `COALESCE(a, b)`. - -#### Typing rule - -The operands must have the same type, which is also the type of the result. - -## Logical operators - -The Boolean operators `AND`, `OR` and `NOT` are available. - -Syntax: - -~~~ -NOT - AND - OR -~~~ - -`AND` and `OR` are commutative. Moreover, the input to `AND` -and `OR` is not evaluated in any particular order. Some operand may -not even be evaluated at all if the result can be fully ascertained using -only the other operand. - -{{site.data.alerts.callout_info}}This is different from the left-to-right "short-circuit logic" found in other programming languages. When it is essential to force evaluation order, use a conditional expression.{{site.data.alerts.end}} - -### Typing rule - -The operands must have type `BOOL`. The result has type `BOOL`. - -## Aggregate Expressions - -An aggregate expression has the same syntax as a function call, with a special -case for `COUNT`: - -~~~ - ( ) -COUNT ( * ) -~~~ - -The difference between aggregate expressions and function calls is -that the former use -[aggregate functions](functions-and-operators.html#aggregate-functions) -and can only appear in the list of rendered expressions in a -[`SELECT` clause](select.html). - -An aggregate expression computes a combined value, depending on -which aggregate function is used, across all the rows currently -selected. - -#### Typing rule - -[The operand and return types are determined like for regular function calls](#function-calls-and-sql-special-forms). - -## Window Function Calls - -A window function call has the syntax of a function call followed by an `OVER` clause: - -~~~ - ( ) OVER - ( * ) OVER -~~~ - -It represents the application of a window or aggregate function over a -subset ("window") of the rows selected by a query. - -#### Typing rule - -[The operand and return types are determined like for regular function calls](#function-calls-and-sql-special-forms). - -## Explicit Type Coercions - -Syntax: - -~~~ - :: -CAST ( AS ) -~~~ - -Evaluates the expression and converts the resulting value to the -specified type. An error is reported if the conversion is invalid. - -For example: `CAST(now() AS DATE)` - -Note that in many cases a type annotation is preferrable to a type -coercion. See the section on -[type annotations](#explicitly-typed-expressions) below for more -details. - -#### Typing rule - -The operand can have any type. -The result has the type specified in the `CAST` expression. - -As a special case, if the operand is a literal, a constant expression -or a placeholder, the `CAST` type is used to guide the typing of the -operand. [See our blog post for more details](https://www.cockroachlabs.com/blog/revisiting-sql-typing-in-cockroachdb/). - -## Collation Expressions - -Syntax: - -~~~ - COLLATE -~~~ - -Evaluates the expression and converts its result to a collated string -with the specified collation. - -For example: `'a' COLLATE de` - -#### Typing rule - -The operand must have type `STRING`. The result has type `COLLATEDSTRING`. - -## Existence Test on the Result of Subqueries - -Syntax: - -~~~ -EXISTS ( ... subquery ... ) -NOT EXISTS ( ... subquery ... ) -~~~ - -Evaluates the subquery and then returns `TRUE` or `FALSE` depending on -whether the subquery returned any row (for `EXISTS`) or didn't return -any row (for `NOT EXISTS`). - -#### Typing rule - -The operand can have any table type. The result has type `BOOL`. - -## Scalar Subqueries - -Syntax: - -~~~ -( ... subquery ... ) -~~~ - -Evaluates the subquery, asserts that it returns a single row and single column, -and then evaluates to the value of that single cell. - -For example: - -~~~sql -> SELECT (SELECT COUNT(*) FROM users) > (SELECT COUNT(*) FROM admins); -~~~ - -returns `TRUE` if there are more rows in table `users` than in table -`admins`. - -#### Typing rule - -The operand must have a table type with only one column. -The result has the type of that single column. - -## Array Constructors - -Syntax: - -~~~ -ARRAY[ , , ... ] -~~~ - -Evaluates to an array containing the specified values. - -For example: - -~~~sql -> SELECT ARRAY[1,2,3] AS a; -~~~ -~~~ -+---------+ -| a | -+---------+ -| {1,2,3} | -+---------+ -~~~ - -The data type of the array is inferred from the values of the provided -expressions. All the positions in the array must have the same data type. - -If there are no expressions specified (empty array), or -all the values are `NULL`, then the type of the array must be -specified explicitly using a type annotation. For example: - -~~~sql -> SELECT ARRAY[]:::int[]; -~~~ - -#### Typing rule - -The operands must all have the same type. -The result has the array type with the operand type as element type. - -## Tuple Constructor - -Syntax: - -~~~ -(, , ...) -ROW (, , ...) -~~~ - -Evaluates to a tuple containing the values of the provided expressions. - -For example: - -~~~sql -> SELECT ('x', 123, 12.3) AS a; -~~~ -~~~ -+----------------+ -| a | -+----------------+ -| ('x',123,12.3) | -+----------------+ -~~~ - -The data type of the resulting tuple is inferred from the values. -Each position in a tuple can have a distinct data type. - -#### Typing rule - -The operands can have any type. -The result has a tuple type whose item types are the types of the operands. - -## Explicitly Typed Expressions - -Syntax: - -~~~ -::: -ANNOTATE_TYPE(, ) -~~~ - -Evaluates to the given expression, requiring the expression to have -the given type. If the expression doesn't have the given type, an -error is returned. - -Type annotations are specially useful to guide the arithmetic on -numeric values. For example: - -~~~sql -> SELECT (1 / 0):::FLOAT; --> +Inf -> SELECT (1 / 0); --> error "division by zero" -> SELECT (1 / 0)::FLOAT; --> error "division by zero" -~~~ - -Type annotations are also different from cast expressions (see above) in -that they do not cause the value to be converted. For example, -`now()::DATE` converts the current timestamp to a date value (and -discards the current time), whereas `now():::DATE` triggers an error -message (that `now()` does not have type `DATE`). - -Check our blog for -[more information about context-dependent typing](https://www.cockroachlabs.com/blog/revisiting-sql-typing-in-cockroachdb/). - -#### Typing rule - -The operand must be implicitly coercible to the given type. -The result has the given type. - -## See Also - -- [Constants](sql-constants.html) -- [Table Expressions](table-expressions.html) -- [Data Types](data-types.html) diff --git a/src/current/v1.0/sql-faqs.md b/src/current/v1.0/sql-faqs.md deleted file mode 100644 index 8715acec395..00000000000 --- a/src/current/v1.0/sql-faqs.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: SQL FAQs -summary: Get answers to frequently asked questions about CockroachDB SQL. -toc: true ---- - - -## How do I bulk insert data into CockroachDB? - -Currently, you can bulk insert data with batches of [`INSERT`](insert.html) statements not exceeding a few MB. The size of your rows determines how many you can use, but 1,000 - 10,000 rows typically works best. For more details, see [Import Data](import-data.html). - -## How do I auto-generate unique row IDs in CockroachDB? - -{% include {{ page.version.version }}/faq/auto-generate-unique-ids.html %} - -## How do I get the last ID/SERIAL value inserted into a table? - -There’s no function in CockroachDB for returning last inserted values, but you can use the [`RETURNING` clause](insert.html#insert-and-return-values) of the `INSERT` statement. - -For example, this is how you’d use `RETURNING` to return an auto-generated [`SERIAL`](serial.html) value: - -~~~ sql -> CREATE TABLE users (id SERIAL, name STRING); - -> INSERT INTO users (name) VALUES ('mike') RETURNING id; -~~~ - -## Does CockroachDB support `JOIN`? - -CockroachDB has basic, non-optimized support for SQL `JOIN`, whose performance we're working to improve. - -To learn more, see our blog posts on CockroachDB's JOINs: -- [Modesty in Simplicity: CockroachDB's JOIN](https://www.cockroachlabs.com/blog/cockroachdbs-first-join/). -- [On the Way to Better SQL Joins](https://www.cockroachlabs.com/blog/better-sql-joins-in-cockroachdb/) - -## When should I use interleaved tables? - -[Interleaving tables](interleave-in-parent.html) improves query performance by optimizing the key-value structure of closely related tables, attempting to keep data on the same key-value range if it's likely to be read and written together. - -{% include {{ page.version.version }}/faq/when-to-interleave-tables.html %} - -## Does CockroachDB support JSON or Protobuf datatypes? - -Not currently, but [we plan to offer JSON/Protobuf datatypes](https://github.com/cockroachdb/cockroach/issues/2969). - -## How do I know which index CockroachDB will select for a query? - -To see which indexes CockroachDB is using for a given query, you can use the [`EXPLAIN`](explain.html) statement, which will print out the query plan, including any indexes that are being used: - -~~~ sql -> EXPLAIN SELECT col1 FROM tbl1; -~~~ - -If you'd like to tell the query planner which index to use, you can do so via some [special syntax for index hints](select.html#force-index-selection-index-hints): - -~~~ sql -> SELECT col1 FROM tbl1@idx1; -~~~ - -## How do I log SQL queries? - -For production clusters, the best way to log queries is to turn on the [cluster-wide setting](cluster-settings.html) `sql.trace.log_statement_execute`: - -~~~ sql -> SET CLUSTER SETTING sql.trace.log_statement_execute = true; -~~~ - -With this setting on, each node of the cluster writes all SQL queries it executes to its log file. When you no longer need to log queries, you can turn the setting back off: - -~~~ sql -> SET CLUSTER SETTING sql.trace.log_statement_execute = false; -~~~ - -Alternatively, if you are testing CockroachDB locally and want to log queries executed just by a specific node, you can pass `--vmodule=executor=2` to the [`cockroach start`](start-a-node.html) command when starting the node. For example, to start a single node locally and log all SQL queries it executes, you'd run: - -~~~ shell -$ cockroach start --insecure --host=localhost --vmodule=executor=2 -~~~ - -## Does CockroachDB support a UUID type? - -Not at this time, but storing a 16-byte array in a [`BYTES`](bytes.html) column should perform just as well. - -## How does CockroachDB sort results when `ORDER BY` is not used? - -When an [`ORDER BY`](select.html#sorting-retrieved-values) clause is not used in a `SELECT` query, retrieved rows are not sorted by any consistent criteria. Instead, CockroachDB returns them as the coordinating node receives them. - -## Why are my `INT` columns returned as strings in JavaScript? - -In CockroachDB, all `INT`s are represented with 64 bits of precision, but JavaScript numbers only have 53 bits of precision. This means that large integers stored in CockroachDB are not exactly representable as JavaScript numbers. For example, JavaScript will round the integer `235191684988928001` to the nearest representable value, `235191684988928000`. Notice that the last digit is different. This is particularly problematic when using the `unique_rowid()` [function](functions-and-operators.html), since `unique_rowid()` nearly always returns integers that require more than 53 bits of precision to represent. - -To avoid this loss of precision, Node's [`pg` driver](https://github.com/brianc/node-postgres) will, by default, return all CockroachDB `INT`s as strings. - -~~~ javascript -// Schema: CREATE TABLE users (id INT DEFAULT unique_rowid(), name STRING); -pgClient.query("SELECT id FROM users WHERE name = 'Roach' LIMIT 1", function(err, res) { - var idString = res.rows[0].id; - // idString === '235191684988928001' - // typeof idString === 'string' -}); -~~~ - -To perform another query using the value of `idString`, you can simply use `idString` directly, even where an `INT` type is expected. The string will automatically be coerced into a CockroachDB `INT`. - -~~~ javascript -pgClient.query("UPDATE users SET name = 'Ms. Roach' WHERE id = $1", [idString], function(err, res) { - // All should be well! -}); -~~~ - -If you instead need to perform arithmetic on `INT`s in JavaScript, you will need to use a big integer library like [Long.js](https://www.npmjs.com/package/long). Do _not_ use the built-in `parseInt` function. - -~~~ javascript -parseInt(idString, 10) + 1; // WRONG: returns 235191684988928000 -require('long').fromString(idString).add(1).toString(); // GOOD: returns '235191684988928002' -~~~ - -## See Also - -- [Product FAQs](frequently-asked-questions.html) -- [Operational FAQS](operational-faqs.html) diff --git a/src/current/v1.0/sql-feature-support.md b/src/current/v1.0/sql-feature-support.md deleted file mode 100644 index 5fceab0379f..00000000000 --- a/src/current/v1.0/sql-feature-support.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: SQL Feature Support in CockroachDB v1.0 -summary: Find CockroachDB's conformance to the SQL standard and which common extensions it supports. -toc: true ---- - -Making CockroachDB easy to use is a top priority for us, so we chose to implement SQL. However, even though SQL has a standard, no database implements all of it, nor do any of them have standard implementations of all features. - -To understand which standard SQL features we support (as well as common extensions to the standard), use the table below. - -- **Component** lists the components that are commonly considered part of SQL. -- **Supported** shows CockroachDB's level of support for the component. -- **Type** indicates whether the component is part of the SQL *Standard* or is an *Extension* created by ourselves or others. -- **Details** provides greater context about the component. - - - -## Features - -### Row Values - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Identifiers | ✓ | Standard | [Identifiers documentation](keywords-and-identifiers.html#identifiers) | -| `INT` | ✓ | Standard | [`INT` documentation](int.html) | -| `FLOAT`, `REAL` | ✓ | Standard | [`FLOAT` documentation](float.html) | -| `BOOLEAN` | ✓ | Standard | [`BOOL` documentation](bool.html) | -| `DECIMAL`, `NUMERIC` | ✓ | Standard | [`DECIMAL` documentation](decimal.html) | -| `NULL` | ✓ | Standard | [*NULL*-handling documentation](null-handling.html) | -| `BYTES` | ✓ | CockroachDB Extension | [`BYTES` documentation](bytes.html) | -| Automatic key generation | ✓ | Common Extension | [`SERIAL` documentation](serial.html) | -| `STRING`, `CHARACTER` | ✓ | Standard | [`STRING` documentation](string.html) | -| `COLLATE` | ✓ | Standard | [`COLLATE` documentation](collate.html) | -| `AUTO INCREMENT` | Alternative | Common Extension | [`SERIAL`](serial.html) replaces support for this component. | -| Key-value pairs | Alternative | Extension | [Key-Value FAQ](frequently-asked-questions.html#can-i-use-cockroachdb-as-a-key-value-store) | -| Arrays | Planned | Standard | [GitHub Issue tracking array support](https://github.com/cockroachdb/cockroach/issues/2115) | -| JSON | Planned | Common Extension | [GitHub Issue tracking JSON support](https://github.com/cockroachdb/cockroach/issues/2969) | -| XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XML parsing. | -| `UNSIGNED INT` | ✗ | Common Extension | `UNSIGNED INT` causes numerous casting issues, so we do not plan to support it. | -| `SET`, `ENUM` | ✗ | MySQL, PostgreSQL Extension | Only allow rows to contain values from a defined set of terms. | - -### Constraints - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Not Null | ✓ | Standard | [Not Null documentation](not-null.html) | -| Unique | ✓ | Standard | [Unique documentation](unique.html) | -| Primary Key | ✓ | Standard | [Primary Key documentation](primary-key.html) | -| Check | ✓ | Standard | [Check documentation](check.html) | -| Foreign Key | ✓ | Standard | [Foreign Key documentation](foreign-key.html) | -| Default Value | ✓ | Standard | [Default Value documentation](default-value.html) | - -### Transactions - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Transactions (ACID semantics) | ✓ | Standard | [Transactions documentation](transactions.html) | -| `BEGIN` | ✓ | Standard | [`BEGIN` documentation](begin-transaction.html) | -| `COMMIT` | ✓ | Standard | [`COMMIT` documentation](commit-transaction.html) | -| `ROLLBACK` | ✓ | Standard | [`ROLLBACK` documentation](rollback-transaction.html) | -| `SAVEPOINT` | ✓ | CockroachDB Extension | While `SAVEPOINT` is part of the SQL standard, we only support [our extension of it](transactions.html#transaction-retries) | - -### Indexes - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Indexes | ✓ | Common Extension | [Indexes documentation](indexes.html) | -| Multi-column indexes | ✓ | Common Extension | We do not limit on the number of columns indexes can include | -| Covering indexes | ✓ | Common Extension | [Storing Columns documentation](create-index.html#store-columns) | -| Multiple indexes per query | Planned | Common Extension | Use multiple indexes to filter the table's values for a single query | -| Full-text indexes | Planned | Common Extension | [GitHub Issue tracking full-text index support](https://github.com/cockroachdb/cockroach/issues/7821) | -| Prefix/Expression Indexes | Potential | Common Extension | Apply expressions (such as `LOWER()`) to values before indexing them | -| Geospatial indexes | Potential | Common Extension | Improves performance of queries calculating geospatial data | -| Hash indexes | ✗ | Common Extension | Improves performance of queries looking for single, exact values | -| Partial indexes | ✗ | Common Extension | Only index specific rows from indexed columns | - -### Schema Changes - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| `ALTER TABLE` | ✓ | Standard | [`ALTER TABLE` documentation](alter-table.html) | -| Database renames | ✓ | Standard | [`RENAME DATABASE` documentation](rename-database.html) | -| Table renames | ✓ | Standard | [`RENAME TABLE` documentation](rename-table.html) | -| Column renames | ✓ | Standard | [`RENAME COLUMN` documentation](rename-column.html) | -| Adding columns | ✓ | Standard | [`ADD COLUMN` documentation](add-column.html) | -| Removing columns | ✓ | Standard | [`DROP COLUMN` documentation](drop-column.html) | -| Adding constraints | ✓ | Standard | [`ADD CONSTRAINT` documentation](add-constraint.html) | -| Removing constraints | ✓ | Standard | [`DROP CONSTRAINT` documentation](drop-constraint.html) | -| Index renames | ✓ | Standard | [`RENAME INDEX` documentation](rename-index.html) | -| Adding indexes | ✓ | Standard | [`CREATE INDEX` documentation](create-index.html) | -| Removing indexes | ✓ | Standard | [`DROP INDEX` documentation](drop-index.html) | - -### Statements - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Common statements | ✓ | Standard | [SQL Statements documentation](sql-statements.html) | -| `UPSERT` | ✓ | PostgreSQL, MSSQL Extension | [`UPSERT` documentation](upsert.html) | -| `EXPLAIN` | ✓ | Common Extension | [`EXPLAIN` documentation](explain.html) | -| `SELECT INTO` | Alternative | Common Extension | You can replicate similar functionality using [`CREATE TABLE`](create-table.html) and then `INSERT INTO ... SELECT ...`. | - -### Clauses - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Common clauses | ✓ | Standard | [SQL Grammar documentation](sql-grammar.html) | -| `LIMIT` | ✓ | Common Extension | Limit the number of rows a statement returns. | -| `LIMIT` with `OFFSET` | ✓ | Common Extension | Skip a number of rows, and then limit the size of the return set. | -| `RETURNING` | ✓ | Common Extension | Retrieve a table of rows statements affect. | - -### Table Expressions - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Table and View references | ✓ | Standard | [Table expressions documentation](table-expressions.html#table-or-view-names) | -| `AS` in table expressions | ✓ | Standard | [Aliased table expressions documentation](table-expressions.html#aliased-table-expressions) | -| `JOIN` (`INNER`, `LEFT`, `RIGHT`, `FULL`, `CROSS`) | [Functional](https://www.cockroachlabs.com/blog/better-sql-joins-in-cockroachdb/) | Standard | [Join expressions documentation](table-expressions.html#join-expressions) | -| Sub-queries as table expressions | Partial | Standard | Non-correlated subqueries are [supported](table-expressions.html#subqueries-as-table-expressions); correlated are not. | -| Table generator functions | Partial | PostgreSQL Extension | [Table generator functions documentation](table-expressions.html#table-generator-functions) | -| `WITH ORDINALITY` | ✓ | CockroachDB Extension | [Ordinality annotation documentation](table-expressions.html#ordinality-annotation) | - -### Value Expressions and Boolean Formulas - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Common functions | ✓ | Standard | [Functions calls and SQL special forms documentation](sql-expressions.html#function-calls-and-sql-special-forms) -| Common operators | ✓ | Standard | [Operators documentation](sql-expressions.html#unary-and-binary-operations) | -| `IF`/`CASE`/`NULLIF` | ✓ | Standard | [Conditional expressions documentation](sql-expressions.html#conditional-expressions) | -| `COALESCE`/`IFNULL` | ✓ | Standard | [Conditional expressions documentation](sql-expressions.html#conditional-expressions) | -| `AND`/`OR`/`NOT` | ✓ | Standard | [Logical operators documentation](sql-expressions.html#logical-operators) | -| `LIKE`/`ILIKE` | ✓ | Standard | [String pattern matching documentation](sql-expressions.html#string-pattern-matching) | -| `SIMILAR TO` | ✓ | Standard | [SQL regexp pattern matching documentation](sql-expressions.html#string-matching-using-sql-regular-expressions) | -| Matching using POSIX regular expressions | ✓ | Common Extension | [POSIX regexp pattern matching documentation](sql-expressions.html#string-matching-using-posix-regular-expressions) | -| `EXISTS` | Partial | Standard | Non-correlated subqueries are [supported](sql-expressions.html#existence-test-on-the-result-of-subqueries); correlated are not. Currently works only with small data sets. | -| Scalar subqueries | Partial | Standard | Non-correlated subqueries are [supported](sql-expressions.html#scalar-subqueries); correlated are not. Currently works only with small data sets. | -| Bitwise arithmetic | ✓ | Common Extension | [Operators documentation](sql-expressions.html#unary-and-binary-operations) | -| Array constructors and subscripting | Partial | PostgreSQL Extension | Array expression documentation: [Constructor syntax](sql-expressions.html#array-constructors) and [Subscripting](sql-expressions.html#subscripted-expressions) | -| `COLLATE`| ✓ | Standard | [Collation expressions documentation](sql-expressions.html#collation-expressions) | -| Column ordinal references | ✓ | CockroachDB Extension | [Column references documentation](sql-expressions.html#column-references) | -| Type annotations | ✓ | CockroachDB Extension | [Type annotations documentation](sql-expressions.html#explicitly-typed-expressions) | - -### Permissions - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Users | ✓ | Standard | [`GRANT` documentation](grant.html) | -| Privileges | ✓ | Standard | [Privileges documentation](privileges.html) | - -### Miscellaneous - -| Component | Supported | Type | Details | -|-----------|-----------|------|---------| -| Column families | ✓ | CockroachDB Extension | [Column Families documentation](column-families.html) | -| Interleaved tables | ✓ | CockroachDB Extension | [Interleaved Tables documentation](interleave-in-parent.html) | -| Information Schema | ✓ | Standard | [Information Schema documentation](information-schema.html) -| Views | ✓ | Standard | [Views documentation](views.html) | -| Window functions | ✓ | Standard | [Window Functions documentation](window-functions.html) | -| Common Table Expressions | Planned | Common Extension | Also known as CTEs or `WITH` clauses. [GitHub issue tracking common table expressions support.](https://github.com/cockroachdb/cockroach/issues/7029) | -| Stored Procedures | Planned | Common Extension | Execute a procedure explicitly. | -| Cursors | ✗ | Standard | Traverse a table's rows. | -| Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. | -| Sequences | ✗ | Common Extension | Create a numeric sequence. Given CockroachDB's distributed architecture, sequences would be expensive. For fast, globally unique key generation, see [`SERIAL`](serial.html). | diff --git a/src/current/v1.0/sql-grammar.md b/src/current/v1.0/sql-grammar.md deleted file mode 100644 index 7636056ace1..00000000000 --- a/src/current/v1.0/sql-grammar.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: SQL Grammar -summary: The full SQL grammar for CockroachDB, generated automatically from the CockroachDB code. -toc: false -back_to_top: true ---- - - - -{{site.data.alerts.callout_success}} -This page describes the full CockroachDB SQL grammar. However, as a starting point, it's best to reference our SQL statements pages first, which provide detailed explanations and examples. -{{site.data.alerts.end}} - -{% comment %} -TODO: clean up the SQL diagrams not to link to these missing nonterminals. -{% endcomment %} - - - - - - - - - - - -
    - {% include {{ page.version.version }}/sql/diagrams/grammar.html %} -
    diff --git a/src/current/v1.0/sql-name-resolution.md b/src/current/v1.0/sql-name-resolution.md deleted file mode 100644 index 7d0c578fe93..00000000000 --- a/src/current/v1.0/sql-name-resolution.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Name Resolution -summary: Table and function names can exist in multiple places. Resolution decides which one to use. -toc: true ---- - -A SQL client can have access to multiple databases side-by-side. The -same table name (e.g., `orders`) can exist in multiple -databases. When a query specifies a table name without a database -name (e.g., `SELECT * FROM orders`), how does CockroachDB know -which `orders` table is being considered? - -This page details how CockroachDB performs **name resolution** to answer -this question. - - -## Overview - -The following **name resolution algorithm** is used both to determine -table names in [table expressions](table-expressions.html) and -function names in [value expressions](sql-expressions.html): - -- If the name is *qualified* (i.e., the name already tells where to look), use this information. - For example, `SELECT * FROM db1.orders` will look up "`orders`" only in `db1`. -- If the name is *unqualified*: - - Try to find the name in the "default database" as set by [`SET DATABASE`](set-vars.html). - - Try to find the name using the [search path](#search-path). - - If the name is not found, produce an error. - -## Search Path - -In addition to the default database configurable via [`SET DATABASE`](set-vars.html), unqualified names are also looked up in the current session's *search path*. - -The search path is a session variable containing a list of databases, -or *namespaces*, where names are looked up. - -The current search path can set using `SET SEARCH_PATH` and can be inspected using [`SHOW SEARCH_PATH` or `SHOW ALL`](show-vars.html). - -By default, the search path for new columns includes just -`pg_catalog`, so that queries can use PostgreSQL compatibility -functions and virtual tables in that namespace without the need to -prefix them with "`pg_catalog.`" every time. - -## See Also - -- [`SET`](set-vars.html) -- [`SHOW`](show-vars.html) diff --git a/src/current/v1.0/sql-statements.md b/src/current/v1.0/sql-statements.md deleted file mode 100644 index f0ea34dfc5d..00000000000 --- a/src/current/v1.0/sql-statements.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: SQL Statements -summary: Overview of SQL statements supported by CockroachDB. -toc: true ---- - -CockroachDB supports the following SQL statements. Click a statement for more details. - - -## Data Manipulation Statements - -Statement | Usage -----------|------------ -[`CREATE TABLE AS`](create-table-as.html) | Create a new table in a database using the results from a `SELECT` statement. -[`DELETE`](delete.html) | Delete specific rows from a table. -[`EXPLAIN`](explain.html) | View debugging and analysis details for a `SELECT`, `INSERT`, `UPDATE`, or `DELETE` statement. -[`INSERT`](insert.html) | Insert rows into a table. -[`SELECT`](select.html) | Select rows from a table. -[`TRUNCATE`](truncate.html) | Deletes all rows from specified tables. -[`UPDATE`](update.html) | Update rows in a table. -[`UPSERT`](upsert.html) | Insert rows that do not violate uniqueness constraints; update rows that do. - -## Data Definition Statements - -Statement | Usage -----------|------------ -[`ADD COLUMN`](add-column.html) | Add columns to a table. -[`ADD CONSTRAINT`](add-constraint.html) | Add a constraint to a column. -[`ALTER COLUMN`](alter-column.html) | Change a column's [Default constraint](default-value.html) or drop the [Not Null constraint](not-null.html). -[`ALTER TABLE`](alter-table.html) | Apply a schema change to a table. -[`ALTER VIEW`](alter-view.html) | Rename a view. -[`CREATE DATABASE`](create-database.html) | Create a new database. -[`CREATE INDEX`](create-index.html) | Create an index for a table. -[`CREATE TABLE`](create-table.html) | Create a new table in a database. -[`CREATE TABLE AS`](create-table-as.html) | Create a new table in a database using the results from a `SELECT` statement. -[`CREATE VIEW`](create-view.html) | Create a new [view](views.html) in a database. -[`DROP COLUMN`](drop-column.html) | Remove columns from a table. -[`DROP CONSTRAINT`](drop-constraint.html) | Remove constraints from a column. -[`DROP DATABASE`](drop-database.html) | Remove a database and all its objects. -[`DROP INDEX`](drop-index.html) | Remove an index for a table. -[`DROP TABLE`](drop-table.html) | Remove a table. -[`DROP VIEW`](drop-view.html)| Remove a view. -[`RENAME COLUMN`](rename-column.html) | Rename a column in a table. -[`RENAME DATABASE`](rename-database.html) | Rename a database. -[`RENAME INDEX`](rename-index.html) | Rename an index for a table. -[`RENAME TABLE`](rename-table.html) | Rename a table or move a table between databases. -[`SHOW COLUMNS`](show-columns.html) | View details about columns in a table. -[`SHOW CONSTRAINTS`](show-constraints.html) | List constraints on a table. -[`SHOW CREATE TABLE`](show-create-table.html) | View the `CREATE TABLE` statement that would create a carbon copy of the specified table. -[`SHOW CREATE VIEW`](show-create-view.html) | View the `CREATE VIEW` statement that would create a carbon copy of the specified view. -[`SHOW DATABASES`](show-databases.html) | List databases in the cluster. -[`SHOW INDEX`](show-index.html) | View index information for a table. -[`SHOW TABLES`](show-tables.html) | List tables in a database. - -## Transaction Management Statements - -Statement | Usage -----------|------------ -[`BEGIN`](begin-transaction.html)| Initiate a [transaction](transactions.html). -[`COMMIT`](commit-transaction.html) | Commit the current [transaction](transactions.html). -[`RELEASE SAVEPOINT`](release-savepoint.html) | When using the CockroachDB-provided function for client-side [transaction retries](transactions.html#transaction-retries), commit the transaction's changes once there are no retryable errors. -[`ROLLBACK`](rollback-transaction.html) | Discard all updates made by the current [transaction](transactions.html) or, when using the CockroachDB-provided function for client-side [transaction retries](transactions.html#transaction-retries), rollback to the `cockroach_restart` savepoint and retry the transaction. -[`SAVEPOINT`](savepoint.html) | When using the CockroachDB-provided function for client-side [transaction retries](transactions.html#transaction-retries), start a retryable transaction. -[`SET TRANSACTION`](set-transaction.html) | Set the isolation level or priority for the session or for an individual [transaction](transactions.html). -[`SHOW`](show-vars.html) | View the current [transaction settings](transactions.html). - -## Privilege Management Statements - -Statement | Usage -----------|------------ -[`CREATE USER`](create-user.html) | Creates a SQL user, which lets you control [privileges](privileges.html) on your databases and tables. -[`GRANT`](grant.html) | Grant privileges to users. -[`REVOKE`](revoke.html) | Revoke privileges from users. -[`SHOW GRANTS`](show-grants.html) | View privileges granted to users. -[`SHOW USERS`](show-users.html) | Lists the users for all databases. - -## Session Management Statements - -Statement | Usage -----------|------------ -[`SET`](set-vars.html) | Set the current session parameters. -[`SET TRANSACTION`](set-transaction.html) | Set the isolation level or priority for an individual [transaction](transactions.html). -[`SHOW`](show-vars.html) | List the current session or transaction settings. - -## Cluster Management Statements - -Statement | Usage -----------|------------ -[`SHOW ALL CLUSTER SETTINGS`](cluster-settings.html#view-current-cluster-settings) | List the current cluster-wide settings. -[`SET CLUSTER SETTING`](cluster-settings.html#change-a-cluster-setting) | Set a cluster-wide setting. - -## Backup & Restore Statements (Enterprise) - -The following statements are available only to [enterprise license](https://www.cockroachlabs.com/pricing/) users. - -{{site.data.alerts.callout_info}}For non-enterprise users, see Back up Data and Restore Data.{{site.data.alerts.end}} - -Statement | Usage -----------|------------ -[`BACKUP`](backup.html) | Create disaster recovery backups of databases and tables. -[`RESTORE`](restore.html) | Restore databases and tables using your backups. diff --git a/src/current/v1.0/sql.md b/src/current/v1.0/sql.md deleted file mode 100644 index 8ebc30575ce..00000000000 --- a/src/current/v1.0/sql.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: SQL -summary: CockroachDB's external API is Standard SQL with extensions. -toc: false ---- - -At the lowest level, CockroachDB is a distributed, strongly-consistent, transactional key-value store, but the external API is [Standard SQL with extensions](sql-feature-support.html). This provides developers familiar relational concepts such as schemas, tables, columns, and indexes and the ability to structure, manipulate, and query data using well-established and time-proven tools and processes. Also, since CockroachDB supports the PostgreSQL wire protocol, it’s simple to get your application talking to Cockroach; just find your [PostgreSQL language-specific driver](install-client-drivers.html) and start building. - -## See Also - -- [SQL Feature Support](sql-feature-support.html) -- [Learn CockroachDB SQL](learn-cockroachdb-sql.html) -- [Use the Built-In SQL Client](use-the-built-in-sql-client.html) -- [SQL in CockroachDB: Mapping Table Data to Key-Value Storage](https://www.cockroachlabs.com/blog/sql-in-cockroachdb-mapping-table-data-to-key-value-storage/) -- [Index Selection in CockroachDB](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/) \ No newline at end of file diff --git a/src/current/v1.0/start-a-local-cluster-in-docker.md b/src/current/v1.0/start-a-local-cluster-in-docker.md deleted file mode 100644 index b38d644c4f9..00000000000 --- a/src/current/v1.0/start-a-local-cluster-in-docker.md +++ /dev/null @@ -1,270 +0,0 @@ ---- -title: Start a Cluster in Docker (Insecure) -summary: Run an insecure multi-node CockroachDB cluster across multiple Docker containers on a single host. -toc: false -allowed_hashes: [os-mac, os-linux, os-windows] ---- - - - -
    - - - -
    - -Once you've [installed the official CockroachDB Docker image](install-cockroachdb.html), it's simple to run an insecure multi-node cluster across multiple Docker containers on a single host, using Docker volumes to persist node data. - -{{site.data.alerts.callout_danger}}Running a stateful application like CockroachDB in Docker is more complex and error-prone than most uses of Docker and is not recommended for production deployments. To run a physically distributed cluster in containers, use an orchestration tool like Kubernetes or Docker Swarm. See Orchestration for more details.{{site.data.alerts.end}} - - - -
    -{% include {{ page.version.version }}/start-in-docker/mac-linux-steps.md %} - -## Step 5. Monitor the cluster - -When you started the first container/node, you mapped the node's default HTTP port `8080` to port `8080` on the host. To check out the Admin UI for your cluster, point your browser to that port on `localhost`, i.e., `http://localhost:8080`. - -CockroachDB Admin UI - -As mentioned earlier, CockroachDB automatically replicates your data behind-the-scenes. To verify that data written in the previous step was replicated successfully, scroll down to the **Replicas per Store** graph and hover over the line: - -CockroachDB Admin UI - -The replica count on each node is identical, indicating that all data in the cluster was replicated 3 times (the default). - -{{site.data.alerts.callout_success}}For more insight into how CockroachDB automatically replicates and rebalances data, and tolerates and recovers from failures, see our replication, rebalancing, fault tolerance demos.{{site.data.alerts.end}} - -## Step 6. Stop the cluster - -Use the `docker stop` and `docker rm` commands to stop and remove the containers (and therefore the cluster): - -{% include copy-clipboard.html %} -~~~ shell -$ docker stop roach1 roach2 roach3 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ docker rm roach1 roach2 roach3 -~~~ - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores: - -{% include copy-clipboard.html %} -~~~ shell -$ rm -rf cockroach-data -~~~ -
    - -
    -{% include {{ page.version.version }}/start-in-docker/mac-linux-steps.md %} - -## Step 5. Monitor the cluster - -When you started the first container/node, you mapped the node's default HTTP port `8080` to port `8080` on the host. To check out the Admin UI for your cluster, point your browser to that port on `localhost`, i.e., `http://localhost:8080`. - -CockroachDB Admin UI - -As mentioned earlier, CockroachDB automatically replicates your data behind-the-scenes. To verify that data written in the previous step was replicated successfully, scroll down to the **Replicas per Store** graph and hover over the line: - -CockroachDB Admin UI - -The replica count on each node is identical, indicating that all data in the cluster was replicated 3 times (the default). - -{{site.data.alerts.callout_success}}For more insight into how CockroachDB automatically replicates and rebalances data, and tolerates and recovers from failures, see our replication, rebalancing, fault tolerance demos.{{site.data.alerts.end}} - -## Step 6. Stop the cluster - -Use the `docker stop` and `docker rm` commands to stop and remove the containers (and therefore the cluster): - -{% include copy-clipboard.html %} -~~~ shell -$ docker stop roach1 roach2 roach3 -~~~ - -{% include copy-clipboard.html %} -~~~ shell -$ docker rm roach1 roach2 roach3 -~~~ - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores: - -{% include copy-clipboard.html %} -~~~ shell -$ rm -rf cockroach-data -~~~ -
    - -
    -## Before You Begin - -If you have not already installed the official CockroachDB Docker image, go to [Install CockroachDB](install-cockroachdb.html) and follow the instructions under **Use Docker**. - -## Step 1. Create a bridge network - -Since you'll be running multiple Docker containers on a single host, with one CockroachDB node per container, you need to create what Docker refers to as a [bridge network](https://docs.docker.com/engine/userguide/networking/#/a-bridge-network). The bridge network will enable the containers to communicate as a single cluster while keeping them isolated from external networks. - -
    PS C:\Users\username> docker network create -d bridge roachnet
    - -We've used `roachnet` as the network name here and in subsequent steps, but feel free to give your network any name you like. - -## Step 2. Start the first node - -{{site.data.alerts.callout_info}}Be sure to replace <username> in the -v flag with your actual username.{{site.data.alerts.end}} - -
    PS C:\Users\username> docker run -d `
    ---name=roach1 `
    ---hostname=roach1 `
    ---net=roachnet `
    --p 26257:26257 -p 8080:8080 `
    --v "//c/Users/<username>/cockroach-data/roach1:/cockroach/cockroach-data" `
    -{{page.release_info.docker_image}}:{{page.release_info.version}} start --insecure
    - -This command creates a container and starts the first CockroachDB node inside it. Let's look at each part: - -- `docker run`: The Docker command to start a new container. -- `-d`: This flag runs the container in the background so you can continue the next steps in the same shell. -- `--name`: The name for the container. This is optional, but a custom name makes it significantly easier to reference the container in other commands, for example, when opening a Bash session in the container or stopping the container. -- `--hostname`: The hostname for the container. You will use this to join other containers/nodes to the cluster. -- `--net`: The bridge network for the container to join. See step 1 for more details. -- `-p 26257:26257 -p 8080:8080`: These flags map the default port for inter-node and client-node communication (`26257`) and the default port for HTTP requests to the Admin UI (`8080`) from the container to the host. This enables inter-container communication and makes it possible to call up the Admin UI from a browser. -- `-v "//c/Users//cockroach-data/roach1:/cockroach/cockroach-data"`: This flag mounts a host directory as a data volume. This means that data and logs for this node will be stored in `Users//cockroach-data/roach1` on the host and will persist after the container is stopped or deleted. For more details, see Docker's Bind Mounts topic. -- `{{page.release_info.docker_image}}:{{page.release_info.version}} start --insecure`: The CockroachDB command to [start a node](start-a-node.html) in the container in insecure mode. - - {{site.data.alerts.callout_success}}By default, each node's cache is limited to 25% of available memory. This default is reasonable when running one container/node per host. When running multiple containers/nodes on a single host, however, it may lead to out of memory errors, especially when testing against the cluster in a serious way. To avoid such errors, you can manually limit each node's cache size by setting the --cache flag in the start command.{{site.data.alerts.end}} - -## Step 3. Add nodes to the cluster - -At this point, your cluster is live and operational. With just one node, you can already connect a SQL client and start building out your database. In real deployments, however, you'll always want 3 or more nodes to take advantage of CockroachDB's [automatic replication](demo-data-replication.html), [rebalancing](demo-automatic-rebalancing.html), and [fault tolerance](demo-fault-tolerance-and-recovery.html) capabilities. - -To simulate a real deployment, scale your cluster by adding two more nodes: - -{{site.data.alerts.callout_info}}Again, be sure to replace <username> in the -v flag with your actual username.{{site.data.alerts.end}} - -
    # Start the second container/node:
    -PS C:\Users\username> docker run -d `
    ---name=roach2 `
    ---hostname=roach2 `
    ---net=roachnet `
    --v "//c/Users/<username>/cockroach-data/roach2:/cockroach/cockroach-data" `
    -{{page.release_info.docker_image}}:{{page.release_info.version}} start --insecure --join=roach1
    -
    -# Start the third container/node:
    -PS C:\Users\username> docker run -d `
    ---name=roach3 `
    ---hostname=roach3 `
    ---net=roachnet `
    --v "//c/Users/<username>/cockroach-data/roach3:/cockroach/cockroach-data" `
    -{{page.release_info.docker_image}}:{{page.release_info.version}} start --insecure --join=roach1
    - -These commands add two more containers and start CockroachDB nodes inside them, joining them to the first node. There are only a few differences to note from step 2: - -- `-v`: This flag mounts a host directory as a data volume. Data and logs for these nodes will be stored in `Users//cockroach-data/roach2` and `Users//cockroach-data/roach3` on the host and will persist after the containers are stopped or deleted. -- `--join`: This flag joins the new nodes to the cluster, using the first container's `hostname`. Note that since each node is in a unique container, using identical default ports won’t cause conflicts. - -## Step 4. Test the cluster - -Now that you've scaled to 3 nodes, you can use any node as a SQL gateway to the cluster. To demonstrate this, use the `docker exec` command to start the [built-in SQL shell](use-the-built-in-sql-client.html) in the first container: - -
    PS C:\Users\username> docker exec -it roach1 ./cockroach sql --insecure
    -# Welcome to the cockroach SQL interface.
    -# All statements must be terminated by a semicolon.
    -# To exit: CTRL + D.
    - -Run some basic [CockroachDB SQL statements](learn-cockroachdb-sql.html): - -~~~ sql -> CREATE DATABASE bank; - -> CREATE TABLE bank.accounts (id INT PRIMARY KEY, balance DECIMAL); - -> INSERT INTO bank.accounts VALUES (1, 1000.50); - -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -Exit the SQL shell on node 1: - -~~~ sql -> \q -~~~ - -Then start the SQL shell in the second container: - -
    PS C:\Users\username> docker exec -it roach2 ./cockroach sql --insecure
    -# Welcome to the cockroach SQL interface.
    -# All statements must be terminated by a semicolon.
    -# To exit: CTRL + D.
    - -Now run the same `SELECT` query: - -~~~ sql -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -As you can see, node 1 and node 2 behaved identically as SQL gateways. - -When you're done, exit the SQL shell on node 2: - -~~~ sql -> \q -~~~ - -## Step 5. Monitor the cluster - -When you started the first container/node, you mapped the node's default HTTP port `8080` to port `8080` on the host. To check out the [Admin UI](explore-the-admin-ui.html) for your cluster, point your browser to that port on `localhost`, i.e., `http://localhost:8080`. - -CockroachDB Admin UI - -As mentioned earlier, CockroachDB automatically replicates your data behind-the-scenes. To verify that data written in the previous step was replicated successfully, scroll down to the **Replicas per Store** graph and hover over the line: - -CockroachDB Admin UI - -The replica count on each node is identical, indicating that all data in the cluster was replicated 3 times (the default). - -{{site.data.alerts.callout_success}}For more insight into how CockroachDB automatically replicates and rebalances data, and tolerates and recovers from failures, see our replication, rebalancing, fault tolerance demos.{{site.data.alerts.end}} - -## Step 6. Stop the cluster - -Use the `docker stop` and `docker rm` commands to stop and remove the containers (and therefore the cluster): - -
    # Stop the containers:
    -PS C:\Users\username> docker stop roach1 roach2 roach3
    -
    -# Remove the containers:
    -PS C:\Users\username> docker rm roach1 roach2 roach3
    - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores: - -
    Remove-Item C:\Users\username> cockroach-data -recurse
    - -
    - -## What's Next? - -- Learn more about [CockroachDB SQL](learn-cockroachdb-sql.html) and the [built-in SQL client](use-the-built-in-sql-client.html) -- [Install the client driver](install-client-drivers.html) for your preferred language -- [Build an app with CockroachDB](build-an-app-with-cockroachdb.html) -- [Explore core CockroachDB features](demo-data-replication.html) like automatic replication, rebalancing, and fault tolerance diff --git a/src/current/v1.0/start-a-local-cluster.md b/src/current/v1.0/start-a-local-cluster.md deleted file mode 100644 index 572057b0d12..00000000000 --- a/src/current/v1.0/start-a-local-cluster.md +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: Start a Local Cluster (Insecure) -summary: Run an insecure multi-node CockroachDB cluster locally with each node listening on a different port. -toc: true -toc_not_nested: true ---- - - - -Once you’ve [installed CockroachDB](install-cockroachdb.html), it’s simple to start an insecure multi-node cluster locally. - -{{site.data.alerts.callout_info}}Running multiple nodes on a single host is useful for testing out CockroachDB, but it's not recommended for production deployments. To run a physically distributed cluster in production, see Manual Deployment, Cloud Deployment, or Orchestration.{{site.data.alerts.end}} - - -## Before You Begin - -Make sure you have already [installed CockroachDB](install-cockroachdb.html). - -## Step 1. Start the first node - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---host=localhost -~~~ - -~~~ -CockroachDB node starting at {{ now | date: "%Y-%m-%d %H:%M:%S.%6 +0000 UTC" }} -build: CCL {{page.release_info.version}} @ {{page.release_info.build_time}} -admin: http://localhost:8080 -sql: postgresql://root@localhost:26257?sslmode=disable -logs: cockroach-data/logs -store[0]: path=cockroach-data -status: initialized new cluster -clusterID: {dab8130a-d20b-4753-85ba-14d8956a294c} -nodeID: 1 -~~~ - -This command starts a node in insecure mode, accepting most [`cockroach start`](start-a-node.html) defaults. - -- The `--insecure` flag makes communication unencrypted. -- Since this is a purely local cluster, `--host=localhost` tells the node to listens only on `localhost`, with default ports used for internal and client traffic (`26257`) and for HTTP requests from the Admin UI (`8080`). -- Node data is stored in the `cockroach-data` directory. -- The [standard output](start-a-node.html#standard-output) gives you helpful details such as the CockroachDB version, the URL for the admin UI, and the SQL URL for clients. - -{{site.data.alerts.callout_success}}By default, each node's cache is limited to 25% of available memory. This default is reasonable when running one node per host. When you run multiple nodes on a single host, however, this default may lead to out-of-memory errors, especially if you test in a serious way. To avoid such errors, you can limit each node's cache size by setting the --cache flag in the start command.{{site.data.alerts.end}} - -## Step 2. Add nodes to the cluster - -At this point, your cluster is live and operational. With just one node, you can already connect a SQL client and start building out your database. In real deployments, however, you'll always want 3 or more nodes to take advantage of CockroachDB's [automatic replication](demo-data-replication.html), [rebalancing](demo-automatic-rebalancing.html), and [fault tolerance](demo-fault-tolerance-and-recovery.html) capabilities. This step helps you simulate a real deployment locally. - -In a new terminal, add the second node: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---store=node2 \ ---host=localhost \ ---port=26258 \ ---http-port=8081 \ ---join=localhost:26257 -~~~ - -In a new terminal, add the third node: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---store=node3 \ ---host=localhost \ ---port=26259 \ ---http-port=8082 \ ---join=localhost:26257 -~~~ - -The main difference in these commands is that you use the `--join` flag to connect the new nodes to the cluster, specifying the address and port of the first node, in this case `localhost:26257`. Since you're running all nodes on the same machine, you also set the `--store`, `--port`, and `--http-port` flags to locations and ports not used by other nodes, but in a real deployment, with each node on a different machine, the defaults would suffice. - -## Step 3. Test the cluster - -Now that you've scaled to 3 nodes, you can use any node as a SQL gateway to the cluster. To demonstrate this, open a new terminal and connect the [built-in SQL client](use-the-built-in-sql-client.html) to node 1: - -{{site.data.alerts.callout_info}}The SQL client is built into the cockroach binary, so nothing extra is needed.{{site.data.alerts.end}} - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure -~~~ - -Run some basic [CockroachDB SQL statements](learn-cockroachdb-sql.html): - -{% include copy-clipboard.html %} -~~~ sql -> CREATE DATABASE bank; -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> CREATE TABLE bank.accounts (id INT PRIMARY KEY, balance DECIMAL); -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> INSERT INTO bank.accounts VALUES (1, 1000.50); -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -Exit the SQL shell on node 1: - -{% include copy-clipboard.html %} -~~~ sql -> \q -~~~ - -Then connect the SQL shell to node 2, this time specifying the node's non-default port: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure --port=26258 -~~~ - -{{site.data.alerts.callout_info}}In a real deployment, all nodes would likely use the default port 26257, and so you wouldn't need to set the --port flag.{{site.data.alerts.end}} - -Now run the same `SELECT` query: - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -As you can see, node 1 and node 2 behaved identically as SQL gateways. - -Exit the SQL shell on node 2: - -{% include copy-clipboard.html %} -~~~ sql -> \q -~~~ - -## Step 4. Monitor the cluster - -To access the [Admin UI](explore-the-admin-ui.html) for your cluster, point a browser to `http://localhost:8080`, or to the address in the `admin` field in the standard output of any node on startup: - -CockroachDB Admin UI - -As mentioned earlier, CockroachDB automatically replicates your data behind-the-scenes. To verify that data written in the previous step was replicated successfully, scroll down to the **Replicas per Node** graph and hover over the line: - -CockroachDB Admin UI - -The replica count on each node is identical, indicating that all data in the cluster was replicated 3 times (the default). - -{{site.data.alerts.callout_success}}For more insight into how CockroachDB automatically replicates and rebalances data, and tolerates and recovers from failures, see our replication, rebalancing, fault tolerance demos.{{site.data.alerts.end}} - -## Step 5. Stop the cluster - -Once you're done with your test cluster, switch to the terminal running the first node and press **CTRL-C** to stop the node. - -At this point, with 2 nodes still online, the cluster remains operational because a majority of replicas are available. To verify that the cluster has tolerated this "failure", connect the built-in SQL shell to nodes 2 or 3. You can do this in the same terminal or in a new terminal. - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach sql --insecure --port=26258 -~~~ - -{% include copy-clipboard.html %} -~~~ sql -> SELECT * FROM bank.accounts; -~~~ - -~~~ -+----+---------+ -| id | balance | -+----+---------+ -| 1 | 1000.5 | -+----+---------+ -(1 row) -~~~ - -Exit the SQL shell: - -{% include copy-clipboard.html %} -~~~ sql -> \q -~~~ - -Now stop nodes 2 and 3 by switching to their terminals and pressing **CTRL-C**. - -{{site.data.alerts.callout_success}}For node 3, the shutdown process will take longer (about a minute) and will eventually force stop the node. This is because, with only 1 of 3 nodes left, a majority of replicas are not available, and so the cluster is no longer operational. To speed up the process, press CTRL-C a second time.{{site.data.alerts.end}} - -If you do not plan to restart the cluster, you may want to remove the nodes' data stores: - -{% include copy-clipboard.html %} -~~~ shell -$ rm -rf cockroach-data node2 node3 -~~~ - -## Step 6. Restart the cluster - -If you decide to use the cluster for further testing, you'll need to restart at least 2 of your 3 nodes from the directories containing the nodes' data stores. - -Restart the first node from the parent directory of `cockroach-data/`: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---host=localhost -~~~ - -{{site.data.alerts.callout_info}}With only 1 node back online, the cluster will not yet be operational, so you will not see a response to the above command until after you restart the second node. -{{site.data.alerts.end}} - -In a new terminal, restart the second node from the parent directory of `node2/`: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---store=node2 \ ---host=localhost \ ---port=26258 \ ---http-port=8081 \ ---join=localhost:26257 -~~~ - -In a new terminal, restart the third node from the parent directory of `node3/`: - -{% include copy-clipboard.html %} -~~~ shell -$ cockroach start --insecure \ ---store=node3 \ ---host=localhost \ ---port=26259 \ ---http-port=8082 \ ---join=localhost:26257 -~~~ - -## What's Next? - -- Learn more about [CockroachDB SQL](learn-cockroachdb-sql.html) and the [built-in SQL client](use-the-built-in-sql-client.html) -- [Install the client driver](install-client-drivers.html) for your preferred language -- [Build an app with CockroachDB](build-an-app-with-cockroachdb.html) -- [Explore core CockroachDB features](demo-data-replication.html) like automatic replication, rebalancing, fault tolerance, and cloud migration. diff --git a/src/current/v1.0/start-a-node.md b/src/current/v1.0/start-a-node.md deleted file mode 100644 index 1a42c4e22a6..00000000000 --- a/src/current/v1.0/start-a-node.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Start a Node -summary: To start a new CockroachDB cluster, or add a node to an existing cluster, run the cockroach start command. -toc: true ---- - -To start a new CockroachDB cluster, or add a node to an existing cluster, run the `cockroach start` [command](cockroach-commands.html) with appropriate flags. - -{{site.data.alerts.callout_info}}Node-level settings are defined by flags passed to the cockroach start command and cannot be changed without stopping and restarting the node. In contrast, some cluster-wide settings are defined via SQL statements and can be updated anytime after a cluster has been started. For more details, see Cluster Settings.{{site.data.alerts.end}} - - -## Synopsis - -~~~ shell -# Start the first node of a cluster: -$ cockroach start - -# Add a node to a cluster: -$ cockroach start - -# View help: -$ cockroach start --help -~~~ - -## Flags - -The `start` command supports the following [general-use](#general) and -[logging](#logging) flags. All flags must be specified each time the -node is started, as they will not be remembered, with the exception of -the `--join` flag. Nevertheless, we recommend specifying -_all_ flags every time, including the `--join` flag, as that will -allow restarted nodes to join the cluster even if their data directory -was destroyed. - -{{site.data.alerts.callout_success}}When adding a node to an existing cluster, include the --join flag.{{site.data.alerts.end}} - -### General - -Flag | Description ------|----------- -`--advertise-host` | The hostname or IP address to advertise to other CockroachDB nodes. If it is a hostname, it must be resolvable from all nodes; if it is an IP address, it must be routable from all nodes.

    When this flag is not set, the node advertises the address in the `--host` flag. -`--attrs` | Arbitray strings, separated by colons, specifying node capability, which might include specialized hardware or number of cores, for example:

    `--attrs=ram:64gb`

    These can be used to influence the location of data replicas. See [Configure Replication Zones](configure-replication-zones.html#replication-constraints) for full details. -`--background` | Set this to start the node in the background. This is better than appending `&` to the command because control is returned to the shell only once the node is ready to accept requests.

    **Note:** `--background` is suitable for writing automated test suites or maintenance procedures that need a temporary server process running in the background. It is not intended to be used to start a long-running server, because it does not fully detach from the controlling terminal. Consider using a service manager or a tool like [daemon(8)](https://www.freebsd.org/cgi/man.cgi?query=daemon&sektion=8) instead. -`--cache` | The total size for caches, shared evenly if there are multiple storage devices. This can be in any bytes-based unit, for example:

    `--cache=1000000000 ----> 1000000000 bytes`
    `--cache=1GB ----> 1000000000 bytes`
    `--cache=1GiB ----> 1073741824 bytes`

    **Default:** 25% of total system memory (excluding swap), or 512MiB if the memory size cannot be determined -`--certs-dir` | The path to the [certificate directory](create-security-certificates.html). The directory must contain valid certificates if running in secure mode.

    **Default:** `${HOME}/.cockroach-certs/` -`--host` | The hostname or IP address to listen on for intra-cluster and client communication. The node will also advertise itself to other nodes using this address only if `--advertise-host` is not specified; in this case, if it is a hostname, it must be resolvable from all nodes, and if it is an IP address, it must be routable from all nodes.

    **Default:** Listen on all interfaces, but this flag can be set to listen on an external address -`--http-host` | The hostname or IP address to listen on for Admin UI HTTP requests.

    **Default:** same as `--host` -`--http-port` | The port to bind to for Admin UI HTTP requests.

    **Default:** `8080` -`--insecure` | Run in insecure mode. If this flag is not set, the `--certs-dir` flag must point to valid certificates.

    Note the following risks: An insecure cluster is open to any client that can access any node's IP addresses; any user, even `root`, can log in without providing a password; any user, connecting as `root`, can read or write any data in your cluster; and there is no network encryption or authentication, and thus no confidentiality.

    **Default:** `false` -`--join`
    `-j` | The address for connecting the node to an existing cluster. When starting the first node, leave this flag out. When starting subsequent nodes, set this flag to the address of any existing node.

    Optionally, you can specify the addresses of multiple existing nodes as a comma-separated list, using multiple `--join` flags, or using a combination of these approaches, for example:

    `--join=localhost:1234,localhost:2345`
    `--join=localhost:1234 --join=localhost:2345`
    `--join=localhost:1234,localhost:2345 --join=localhost:3456` -`--listening-url-file` | The file to which the node's SQL connection URL will be written on successful startup, in addition to being printed to the [standard output](#standard-output).

    This is particularly helpful in identifying the node's port when an unused port is assigned automatically (`--port=0`). -`--locality` | Arbitrary key-value pairs that describe the locality of the node. Locality might include country, region, datacenter, rack, etc.

    CockroachDB attempts to spread replicas evenly across the cluster based on locality, with the order determining the priority. The keys themselves and the order of key-value pairs must be the same on all nodes, for example:

    `--locality=region=east,datacenter=us-east-1`
    `--locality=region=west,datacenter=us-west-1`

    These can be used to influence the location of data replicas. See [Configure Replication Zones](configure-replication-zones.html#replication-constraints) for full details. -`--max-offset` | The maximum allowed clock offset for the cluster. If observed clock offsets exceed this limit, servers will crash to minimize the likelihood of reading inconsistent data. Increasing this value will increase the time to recovery of failures as well as the frequency of uncertainty-based read restarts.

    Note that this value must be the same on all nodes in the cluster and cannot be changed with a [rolling upgrade](upgrade-cockroach-version.html). In order to change it, first stop every node in the cluster. Then once the entire cluster is offline, restart each node with the new value.

    **Default:** 500ms -`--max-sql-memory` | The total size for storage of temporary data for SQL clients, including prepared queries and intermediate data rows during query execution. This can be in any bytes-based unit, for example:

    `--max-sql-memory=10000000000 ----> 1000000000 bytes`
    `--max-sql-memory=1GB ----> 1000000000 bytes`
    `--max-sql-memory=1GiB ----> 1073741824 bytes`

    **Default:** 25% of total system memory (excluding swap), or 512MiB if the memory size cannot be determined -`--pid-file` | The file to which the node's process ID will be written on successful startup. When this flag is not set, the process ID is not written to file. -`--port`
    `-p` | The port to bind to for internal and client communication.

    To have an unused port assigned automatically, pass `--port=0`.

    **Env Variable:** `COCKROACH_PORT`
    **Default:** `26257` -`--store`
    `-s` | The file path to a storage device and, optionally, store attributes and maximum size. When using multiple storage devices for a node, this flag must be specified separately for each device, for example:

    `--store=/mnt/ssd01 --store=/mnt/ssd02`

    For more details, see [`store`](#store) below. - -### Logging - -By default, `cockroach start` writes all messages to log files, and prints nothing to `stderr`. However, you can control the process's [logging](debug-and-error-logs.html) behavior with the following flags: - -{% include {{ page.version.version }}/misc/logging-flags.md %} - -#### Defaults - -`cockroach start` uses the equivalent values for these logging flags by default: - -- `--log-dir=/logs` -- `--logtostderr=NONE` - -This means, by default, CockroachDB writes all messages to log files, and never prints to `stderr`. - -### `store` - -The `store` flag supports the following fields. Note that commas are used to separate fields, and so are forbidden in all field values. - -{{site.data.alerts.callout_info}}In-memory storage is not suitable for production deployments at this time.{{site.data.alerts.end}} - -Field | Description -------|------------ -`type` | For in-memory storage, set this field to `mem`; otherwise, leave this field out. The `path` field must not be set when `type=mem`. -`path` | The file path to the storage device. When not setting `attr` or `size`, the `path` field label can be left out:

    `--store=/mnt/ssd01`

    When either of those fields are set, however, the `path` field label must be used:

    `--store=path=/mnt/ssd01,size=20GB`

    **Default:** `cockroach-data` -`attrs` | Arbitrary strings, separated by colons, specifying disk type or capability. These can be used to influence the location of data replicas. See [Configure Replication Zones](configure-replication-zones.html#replication-constraints) for full details.

    In most cases, node-level `--locality` or `--attrs` are preferable to store-level attributes, but this field can be used to match capabilities for storage of individual databases or tables. For example, an OLTP database would probably want to allocate space for its tables only on solid state devices, whereas append-only time series might prefer cheaper spinning drives. Typical attributes include whether the store is flash (`ssd`) or spinny disk (`hdd`), as well as speeds and other specs, for example:

    `--store=path=/mnt/hda1,attrs=hdd:7200rpm` -`size` | The maximum size allocated to the node. When this size is reached, CockroachDB attempts to rebalance data to other nodes with available capacity. When there's no capacity elsewhere, this limit will be exceeded. Also, data may be written to the node faster than the cluster can rebalance it away; in this case, as long as capacity is available elsewhere, CockroachDB will gradually rebalance data down to the store limit.

    The `size` can be specified either in a bytes-based unit or as a percentage of hard drive space, for example:

    `--store=path=/mnt/ssd01,size=10000000000 ----> 10000000000 bytes`
    `--store=path=/mnt/ssd01,size=20GB ----> 20000000000 bytes`
    `--store=path=/mnt/ssd01,size=20GiB ----> 21474836480 bytes`
    `--store=path=/mnt/ssd01,size=0.02TiB ----> 21474836480 bytes`
    `--store=path=/mnt/ssd01,size=20% ----> 20% of available space`
    `--store=path=/mnt/ssd01,size=0.2 ----> 20% of available space`
    `--store=path=/mnt/ssd01,size=.2 ----> 20% of available space`

    **Default:** 100%

    For an in-memory store, the `size` field is required and must be set to the true maximum bytes or percentage of available memory, for example:

    `--store=type=mem,size=20GB`
    `--store=type=mem,size=90%` - -## Standard Output - -When you run `cockroach start`, some helpful details are printed to the standard output: - -~~~ shell -CockroachDB node starting at {{ now | date: "%Y-%m-%d %H:%M:%S.%6 +0000 UTC" }} -build: CCL {{page.release_info.version}} @ {{page.release_info.build_time}} -admin: http://ROACHs-MBP:8080 -sql: postgresql://root@ROACHs-MBP:26257?sslmode=disable -logs: node1/logs -attrs: ram:64gb -locality: datacenter=us-east1 -store[0]: path=node1,attrs=ssd -status: initialized new cluster -clusterID: 7b9329d0-580d-4035-8319-53ba8b74b213 -nodeID: 1 -~~~ - -{{site.data.alerts.callout_success}}These details are also written to the INFO log in the /logs directory in case you need to refer to them at a later time.{{site.data.alerts.end}} - -Field | Description -------|------------ -`build` | The version of CockroachDB you are running. -`admin` | The URL for accessing the Admin UI. -`sql` | The connection URL for your client. -`logs` | The directory containing debug log data. -`attrs` | If node-level attributes were specified in the `--attrs` flag, they are listed in this field. These details are potentially useful for [configuring replication zones](configure-replication-zones.html). -`locality` | If values describing the locality of the node were specified in the `--locality` field, they are listed in this field. These details are potentially useful for [configuring replication zones](configure-replication-zones.html). -`store[n]` | The directory containing store data, where `[n]` is the index of the store, e.g., `store[0]` for the first store, `store[1]` for the second store.

    If store-level attributes were specified in the `attrs` field of the [`--store`](#store) flag, they are listed in this field as well. These details are potentially useful for [configuring replication zones](configure-replication-zones.html). -`status` | Whether the node is the first in the cluster (`initialized new cluster`), joined an existing cluster for the first time (`initialized new node, joined pre-existing cluster`), or rejoined an existing cluster (`restarted pre-existing node`). -`clusterID` | The ID of the cluster.

    When trying to join a node to an existing cluster, if this ID is different than the ID of the existing cluster, the node has started a new cluster. This may be due to conflicting information in the node's data directory. For additional guidance, see the [troubleshooting](cluster-setup-troubleshooting.html#node-will-not-join-cluster) docs. -`nodeID` | The ID of the node. - -## Examples - -### Start a local cluster - -This example demonstrates starting up three nodes locally. See [Start a Local Cluster (Insecure)](start-a-local-cluster.html) and [Start a Local Cluster (Secure)](secure-a-cluster.html) for a detailed walkthrough. - -~~~ shell -# Insecure: -$ cockroach start --insecure -$ cockroach start --insecure --store=node2 --port=26258 --http-port=8081 --join=localhost:26257 -$ cockroach start --insecure --store=node3 --port=26259 --http-port=8082 --join=localhost:26257 - -# Secure: -$ cockroach start --certs-dir=certs --http-host=localhost --background -$ cockroach start --certs-dir=certs --store=node2 --port=26258 --http-host=localhost --http-port=8081 --join=localhost:26257 -$ cockroach start --certs-dir=certs --store=node3 --port=26259 --http-host=localhost --http-port=8082 --join=localhost:26257 -~~~ - -### Start a distributed cluster - -This example demonstrates starting up three nodes on different machines. Because each is on a different machine, default ports can be used without causing conflict. See [Manual Deployment](manual-deployment.html) for a detailed walkthrough. - - -~~~ shell -# Insecure: -$ cockroach start --insecure --host= -$ cockroach start --insecure --host= --join=:26257 -$ cockroach start --insecure --host= --join=:26257 - -# Secure: -$ cockroach start --certs-dir=certs --host= --http-host= -$ cockroach start --certs-dir=certs --host= --http-host= --join=:26257 -$ cockroach start --certs-dir=certs --host= --http-host= --join=:26257 -~~~ - -## See Also - -[Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/stop-a-node.md b/src/current/v1.0/stop-a-node.md deleted file mode 100644 index 2f54cd1055b..00000000000 --- a/src/current/v1.0/stop-a-node.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Stop a Node -summary: Learn how to stop a node. -toc: true ---- - -This page shows you how to use the `cockroach quit` [command](cockroach-commands.html) to stop a node. - - -## Overview - -### How It Works - -When you stop a node, CockroachDB lets the node finish in-flight requests and transfers all **range leases** off the node before shutting it down. If the node then stays offline for more than 5 minutes, the cluster considers the node dead and starts to transfer its **range replicas** to other nodes as well. - -After that, if the node comes back online, its range replicas will determine whether or not they are still valid members of replica groups. If a range replica is still valid and any data in its range has changed, it will receive updates from another replica in the group. If a range replica is no longer valid, it will be removed from the node. - -Basic terms: - -- **Range**: CockroachDB stores all user data and almost all system data in a giant sorted map of key value pairs. This keyspace is divided into "ranges", contiguous chunks of the keyspace, so that every key can always be found in a single range. -- **Range Replica:** CockroachDB replicates each range (3 times by default) and stores each replica on a different node. -- **Range Lease:** For each range, one of the replicas holds the "range lease". This replica, referred to as the "leaseholder", is the one that receives and coordinates all read and write requests for the range. - -### Considerations - -Before temporarily stopping a node to [upgrade its version of CockroachDB](upgrade-cockroach-version.html), if you expect the node to be offline for longer than 5 minutes, you should first set the `server.time_until_store_dead` [cluster setting](cluster-settings.html) to higher than the `5m0s` default. For example, if you think the node might be offline for up to 8 minutes, you might change this setting as follows: - -~~~ sql -> SET CLUSTER SETTING server.time_until_store_dead = 10m0s; -~~~ - -## Synopsis - -~~~ shell -# Stop a node: -$ cockroach quit - -# View help: -$ cockroach quit --help -~~~ - -## Flags - -The `quit` command supports the following [general-use](#general) and [logging](#logging) flags. - -### General - -Flag | Description ------|------------ -`--certs-dir` | The path to the [certificate directory](create-security-certificates.html). If the cluster is secure, this directory must contain a valid CA certificate and a client certificate and key for the `root` user. Client certificates for other users are not supported.

    **Env Variable:** `COCKROACH_CERTS_DIR`
    **Default:** `${HOME}/.cockroach-certs/` -`--host` | The server host to connect to. This can be the address of any node in the cluster.

    **Env Variable:** `COCKROACH_HOST`
    **Default:**`localhost` -`--insecure` | Run in insecure mode. If this flag is not set, the `--certs-dir` flag must point to valid certificates.

    **Env Variable:** `COCKROACH_INSECURE`
    **Default:** `false` -`--port` | The server port to connect to.

    **Env Variable:** `COCKROACH_PORT`
    **Default:** `26257` - -### Logging - -By default, the `quit` command logs errors to `stderr`. - -If you need to troubleshoot this command's behavior, you can change its [logging behavior](debug-and-error-logs.html). - -## Examples - -### Stop a Node from the Machine Where It's Running - -1. SSH to the machine where the node is running. - -2. If the node is running in the background and you are using a process manager for automatic restarts, use the process manager to stop the `cockroach` process without restarting it. - - If the node is running in the background and you are not using a process manager, send a kill signal to the `cockroach` process, for example: - - ~~~ shell - $ pkill cockroach - ~~~ - - If the node is running in the foreground, press `CTRL-C`. - -3. Verify that the `cockroach` process has stopped: - - ~~~ shell - $ ps aux | grep cockroach - ~~~ - - Alternately, you can check the node's logs for the message `server drained and shutdown completed`. - -### Stop a Node from Another Machine - -
    - - -
    - -
    -1. [Install the `cockroach` binary](install-cockroachdb.html) on a machine separate from the node. - -2. Create a `certs` directory and copy the CA certificate and the client certificate and key for the `root` user into the directory. - -3. Run the `cockroach quit` command without the `--decommission` flag: - - ~~~ shell - $ cockroach quit --certs-dir=certs --host=
    - ~~~ -
    - -
    -1. [Install the `cockroach` binary](install-cockroachdb.html) on a machine separate from the node. - -2. Run the `cockroach quit` command without the `--decommission` flag: - - ~~~ shell - $ cockroach quit --insecure --host=
    - ~~~ -
    - -## See Also - -- [Upgrade a Cluster's Version](upgrade-cockroach-version.html) -- [Other Cockroach Commands](cockroach-commands.html) diff --git a/src/current/v1.0/string.md b/src/current/v1.0/string.md deleted file mode 100644 index 4e570858389..00000000000 --- a/src/current/v1.0/string.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: STRING -summary: The STRING data type stores a string of Unicode characters. -toc: true ---- - -The `STRING` [data type](data-types.html) stores a string of Unicode characters. - - - - -## Aliases - -In CockroachDB, the following are aliases for `STRING`: - -- `CHARACTER` -- `CHAR` -- `VARCHAR` -- `TEXT` - -And the following are aliases for `STRING(n)`: - -- `CHARACTER(n)` -- `CHARACTER VARYING(n)` -- `CHAR(n)` -- `CHAR VARYING(n)` -- `VARCHAR(n)` - -## Length - -To limit the length of a string column, use `STRING(n)`, where `n` is the maximum number of Unicode code points (normally thought of as "characters") allowed. - -When inserting a string: - -- If the value exceeds the column's length limit, CockroachDB gives an error. -- If the value is cast as a string with a length limit (e.g., `CAST('hello world' AS STRING(5))`), CockroachDB truncates to the limit. -- If the value is under the column's length limit, CockroachDB does **not** add padding. This applies to `STRING(n)` and all its aliases. - -## Syntax - -A value of type `STRING` can be expressed using a variety of formats. -See [string literals](sql-constants.html#string-literals) for more details. - -When printing out a `STRING` value in the [SQL shell](use-the-built-in-sql-client.html), the shell uses the simple -SQL string literal format if the value doesn't contain special character, -or the escaped format otherwise. - -### Collations - -`STRING` values accept [collations](collate.html), which lets you sort strings according to language- and country-specific rules. - -{{site.data.alerts.callout_danger}}You cannot current use collated strings in indexes or primary keys; doing so causes CockroachDB to crash. If you're interested in using collated strings in these contexts, you can follow this issue on GitHub to be notified when it's resolved.{{site.data.alerts.end}} - -## Size - -The size of a `STRING` value is variable, but it's recommended to keep values under 64 kilobytes to ensure performance. Above that threshold, [write amplification](https://en.wikipedia.org/wiki/Write_amplification) and other considerations may cause significant performance degradation. - -## Examples - -~~~ sql -> CREATE TABLE strings (a STRING PRIMARY KEY, b STRING(4), c TEXT); - -> SHOW COLUMNS FROM strings; -~~~ -~~~ -+-------+-----------+-------+---------+ -| Field | Type | Null | Default | -+-------+-----------+-------+---------+ -| a | STRING | false | NULL | -| b | STRING(4) | true | NULL | -| c | STRING | true | NULL | -+-------+-----------+-------+---------+ -~~~ -~~~ sql -> INSERT INTO strings VALUES ('a1b2c3d4', 'e5f6', 'g7h8i9'); - -> SELECT * FROM strings; -~~~ -~~~ -+----------+------+--------+ -| a | b | c | -+----------+------+--------+ -| a1b2c3d4 | e5f6 | g7h8i9 | -+----------+------+--------+ -~~~ - -## Supported Casting & Conversion - -`STRING` values can be [cast](data-types.html#data-type-conversions-casts) to any of the following data types: - -Type | Details ------|-------- -`INT` | Requires supported [`INT`](int.html) string format, e.g., `'10'`. -`DECIMAL` | Requires supported [`DECIMAL`](decimal.html) string format, e.g., `'1.1'`. -`FLOAT` | Requires supported [`FLOAT`](float.html) string format, e.g., `'1.1'`. -`BOOL` | Requires supported [`BOOL`](bool.html) string format, e.g., `'true'`. -`DATE` | Requires supported [`DATE`](date.html) string format, e.g., `'2016-01-25'`. -`TIMESTAMP` | Requires supported [`TIMESTAMP`](timestamp.html) string format, e.g., `''2016-01-25 10:10:10.555555''`. -`INTERVAL` | Requires supported [`INTERVAL`](interval.html) string format, e.g., `'1h2m3s4ms5us6ns'`. -`BYTES` | Requires supported [`BYTES`](bytes.html) string format, e.g., `b'\141\061\142\062\143\063'`. - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/strong-consistency.md b/src/current/v1.0/strong-consistency.md deleted file mode 100644 index 16d60de2395..00000000000 --- a/src/current/v1.0/strong-consistency.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Strong Consistency -summary: CockroachDB implements consistent replication via majority consensus between replicas. -toc: false ---- - -CockroachDB replicates your data multiple times and guarantees consistency between replicas. - -Key properties: - -- CockroachDB guarantees serializable SQL transactions - [as long as system clocks are synchronized with NTP](https://www.cockroachlabs.com/blog/living-without-atomic-clocks/) -- No downtime for server restarts, machine failures, or datacenter outages -- Local or wide-area replication with no stale reads on failover -- Employs Raft, a popular successor to Paxos - -How does this work? - -- Stored data is versioned with MVCC, so reads simply limit - their scope to the data visible at the time the read transaction started. - -- Writes are serviced using the - [Raft consensus algorithm](https://raft.github.io/), a popular - alternative to - Paxos. - A consensus algorithm guarantees that any majority of replicas - together always agree on whether an update was committed - successfully. Updates (writes) must reach a majority of replicas (2 - out of 3 by default) before they are considered committed. - - To ensure that a write transaction does not interfere with - read transactions that start after it, CockroachDB also uses - a [timestamp cache](https://www.cockroachlabs.com/blog/serializable-lockless-distributed-isolation-cockroachdb/) - which remembers when data was last read by ongoing transactions. - - This ensures that clients always observe serializable consistency - with regards to other concurrent transactions. - -Strong consistency in CockroachDB - -## See Also - -- [Serializable, Lockless, Distributed: Isolation in CockroachDB](https://www.cockroachlabs.com/blog/serializable-lockless-distributed-isolation-cockroachdb/) -- [Consensus, Made Thrive](https://www.cockroachlabs.com/blog/consensus-made-thrive/) -- [Trust, But Verify: How CockroachDB Checks Replication](https://www.cockroachlabs.com/blog/trust-but-verify-cockroachdb-checks-replication/) -- [Living Without Atomic Clocks](https://www.cockroachlabs.com/blog/living-without-atomic-clocks/) -- [The CockroachDB Architecture Document](https://github.com/cockroachdb/cockroach/blob/master/docs/design.md) diff --git a/src/current/v1.0/support-resources.md b/src/current/v1.0/support-resources.md deleted file mode 100644 index 0862de42db6..00000000000 --- a/src/current/v1.0/support-resources.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Support Resources -summary: There are various ways to reach out for support from Cockroach Labs and our community. -toc: false ---- - -If you're having an issue with CockroachDB, you can reach out for support from Cockroach Labs and our community: - -- [Troubleshooting documentation](troubleshooting-overview.html) -- [CockroachDB Community Forum](https://forum.cockroachlabs.com) -- [CockroachDB Community Slack](https://cockroachdb.slack.com) -- [StackOverflow](http://stackoverflow.com/questions/tagged/cockroachdb) -- [File a GitHub issue](file-an-issue.html) -- [CockroachDB Support Portal](https://support.cockroachlabs.com) - -We also rely on contributions from users like you. If you know how to help users who might be struggling with a problem, we hope you will! diff --git a/src/current/v1.0/table-expressions.md b/src/current/v1.0/table-expressions.md deleted file mode 100644 index 1a1b80f6b4d..00000000000 --- a/src/current/v1.0/table-expressions.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Table Expressions -summary: Table expressions define a data source in SELECT and INSERT statements. -toc: true ---- - -Table expressions define a data source in the `FROM` clause of -[`SELECT`](select.html) and [`INSERT`](insert.html) statements. - - -## Introduction - -Table expressions are used prominently in the `SELECT` clause: - -~~~sql -> SELECT ... FROM
    ,
    , ... -> INSERT INTO ... SELECT ... FROM
    ,
    , ... -~~~ - -CockroachDB recognizes the following table expressions: - -- a [table or view name](#table-or-view-names); -- a [table generator function](#table-generator-functions); -- a `SELECT` or `VALUES` clause, as [a sub-query](#subqueries-as-table-expressions); -- an [aliased table expression](#aliased-table-expressions), using an `AS` clause; -- an explicit [`JOIN` expression](#join-expressions); -- another table expression [annoted with `WITH ORDINALITY`](#ordinality-annotation); or -- another table expression between parentheses. - -The following sections provide details on each of these options. - -In addition to this, the `FROM` clause itself accepts more than one -consecutive table expressions at the top level, separated by -commas. This is a shorthand notation for `CROSS JOIN`, documented in -the `JOIN` syntax below. - -## Table or View Names - -Syntax: - -~~~ -identifier -identifier.identifier -~~~ - -A single SQL identifier in a table expression context designates -the contents of the table or [view](views.html) with that name -in the current database, as configured by [`SET DATABASE`](set-vars.html). - -If the name is prefixed by another identifier and a period, the table -or view is searched in the database with that name. See the section on -[name resolution](sql-name-resolution.html) for more details. - -For example: - -~~~sql -> SELECT * FROM users -- uses table `users` in the current database; -> SELECT * FROM mydb.users -- uses table `users` in database `mydb`; -~~~ - -## Table Generator Functions - -Syntax: - -~~~ -name ( arguments... ) -~~~ - -The name of a table generator function, followed by an opening -parenthesis, followed by zero or more expression arguments, followed -by a closing parenthesis. - -This designates a transient data source produced by the designated -function. - -Currently CockroachDB only supports the generator function -`pg_catalog.generate_series()`, for compatibility with -[the PostgreSQL set-generating function of the same name](https://www.postgresql.org/docs/9.6/static/functions-srf.html). - -For example: - -~~~sql -> SELECT * FROM generate_series(1, 3) -~~~ -~~~ -+-----------------+ -| generate_series | -+-----------------+ -| 1 | -| 2 | -| 3 | -+-----------------+ -~~~ - -## Subqueries as Table Expressions - -Syntax: - -~~~ -( ... subquery ... ) -~~~ - -The subquery can be expressed either as a `SELECT` or `VALUES` clause. -The parentheses around the subquery are mandatory. - -For example: - -~~~sql -> SELECT * FROM (VALUES(1), (2), (3)); -> SELECT c+2 FROM (SELECT COUNT(*) AS c FROM users); -~~~ - -## Aliased Table Expressions - -Syntax: - -~~~ -
    AS -
    AS (, , ...) -~~~ - -In the first form, the table expression is equivalent to its left operand -with a new name for the entire table, and where columns retain their original name. - -In the second form, the columns are also renamed. - -For example: - -~~~sql -> SELECT c.x FROM (SELECT COUNT(*) AS x FROM users) AS c; -> SELECT c.x FROM (SELECT COUNT(*) FROM users) AS c(x); -~~~ - -## Join Expressions - -Syntax: - -~~~ shell -# Inner joins: -
    [ INNER ] JOIN
    ON -
    [ INNER ] JOIN
    USING(, , ...) -
    NATURAL [ INNER ] JOIN
    -
    CROSS JOIN
    - -# Left outer joins: -
    LEFT [ OUTER ] JOIN
    ON -
    LEFT [ OUTER ] JOIN
    USING(, , ...) -
    NATURAL LEFT [ OUTER ] JOIN
    - -# Right outer joins: -
    RIGHT [ OUTER ] JOIN
    ON -
    RIGHT [ OUTER ] JOIN
    USING(, , ...) -
    NATURAL RIGHT [ OUTER ] JOIN
    -~~~ - -These expressions designate the -[SQL join operation](https://en.wikipedia.org/wiki/Join_(SQL)) on the -two operand table expressions. - -Currently works only with small data sets; find more info in our [blog post](https://www.cockroachlabs.com/blog/cockroachdbs-first-join/). - -## Ordinality Annotation - -Syntax: - -~~~ -
    WITH ORDINALITY -~~~ - -Designates a data source equivalent to the table expression operand with -an extra "Ordinality" column that enumerates every row in the data source. - -For example: - -~~~sql -> SELECT * FROM (VALUES('a'),('b'),('c')); -~~~ -~~~ -+---------+ -| column1 | -+---------+ -| a | -| b | -| c | -+---------+ -~~~ - -~~~sql -> SELECT * FROM (VALUES ('a'), ('b'), ('c')) WITH ORDINALITY; -~~~ -~~~ -+---------+------------+ -| column1 | ordinality | -+---------+------------+ -| a | 1 | -| b | 2 | -| c | 3 | -+---------+------------+ -~~~ - -{{site.data.alerts.callout_info}} -WITH ORDINALITY necessarily prevents some optimizations of the -surrounding query. Use it sparingly if performance is a concern, and -always check the output of EXPLAIN in case of doubt. -{{site.data.alerts.end}} - -## See Also - -- [Constants](sql-constants.html) -- [Value Expressions](sql-expressions.html) -- [Data Types](data-types.html) diff --git a/src/current/v1.0/timestamp.md b/src/current/v1.0/timestamp.md deleted file mode 100644 index ea8d98c638a..00000000000 --- a/src/current/v1.0/timestamp.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: TIMESTAMP / TIMESTAMPTZ -summary: The TIMESTAMP and TIMESTAMPTZ data types stores a date and time pair in UTC. -toc: true ---- - -The `TIMESTAMP` and `TIMESTAMPTZ` [data types](data-types.html) stores a date and time pair in UTC. - - -## Time Zone Details - -`TIMESTAMP` has two variants: - -- `TIMESTAMP WITH TIME ZONE` converts `TIMESTAMP` values from UTC to the client's session time zone (unless another time zone is specified for the value). However, it is conceptually important to note that `TIMESTAMP WITH TIME ZONE` *does not* store any time zone data. - - {{site.data.alerts.callout_info}}The default session time zone is UTC, which means that by default `TIMESTAMP WITH TIME ZONE` values display in UTC.{{site.data.alerts.end}} - -- `TIMESTAMP WITHOUT TIME ZONE` presents all `TIMESTAMP` values in UTC. - -The difference between these two types is that `TIMESTAMP WITH TIME ZONE` uses the client's session time zone, while the other simply does not. This behavior extends to functions like `now()` and `extract()` on `TIMESTAMP WITH TIME ZONE` values. - -### Best Practices - -We recommend always using the `...WITH TIME ZONE` variant because the `...WITHOUT TIME ZONE` variant can sometimes lead to unexpected behaviors when it ignores a session offset. - -However, we also recommend you avoid setting a session time for your database. Instead, convert values from the default time zone (UTC) on the client side. - -## Aliases - -In CockroachDB, the following are aliases: - -- `TIMESTAMP`, `TIMESTAMP WITHOUT TIME ZONE` -- `TIMESTAMPTZ`, `TIMESTAMP WITH TIME ZONE` - -## Syntax - -A constant value of type `TIMESTAMP`/`TIMESTAMPTZ` can be expressed using an -[interpreted literal](sql-constants.html#interpreted-literals), or a -string literal -[annotated with](sql-expressions.html#explicitly-typed-expressions) -type `TIMESTAMP`/`TIMESTAMPTZ` or -[coerced to](sql-expressions.html#explicit-type-coercions) type -`TIMESTAMP`/`TIMESTAMPTZ`. - -`TIMESTAMP` constants can be expressed using the -following string literal formats: - -Format | Example --------|-------- -Date only | `TIMESTAMP '2016-01-25'` -Date and Time | `TIMESTAMP '2016-01-25 10:10:10.555555'` -ISO 8601 | `TIMESTAMP '2016-01-25T10:10:10.555555'` - -To express a `TIMESTAMPTZ` value (with time zone offset from UTC), use -the following format: `TIMESTAMPTZ '2016-01-25 10:10:10.555555-05:00'` - -When it is unambiguous, a simple unannotated string literal can also -be automatically interpreted as type `TIMESTAMP` or `TIMESTAMPTZ`. - -Note that the fractional portion is optional and is rounded to -microseconds (6 digits after decimal) for compatibility with the -PostgreSQL wire protocol. - -## Size - -A `TIMESTAMP` column supports values up to 12 bytes in width, but the total storage size is likely to be larger due to CockroachDB metadata. - -## Examples - -~~~ sql -> CREATE TABLE timestamps (a INT PRIMARY KEY, b TIMESTAMPTZ); - -> SHOW COLUMNS FROM timestamps; -~~~ -~~~ -+-------+--------------------------+-------+---------+ -| Field | Type | Null | Default | -+-------+--------------------------+-------+---------+ -| a | INT | false | NULL | -| b | TIMESTAMP WITH TIME ZONE | true | NULL | -+-------+--------------------------+-------+---------+ -(2 rows) -~~~ -~~~ sql -> INSERT INTO timestamps VALUES (1, TIMESTAMPTZ '2016-03-26 10:10:10-05:00'), (2, TIMESTAMPTZ '2016-03-26'); - -> SELECT * FROM timestamps; -~~~ -~~~ -+---+---------------------------+ -| a | b | -+---+---------------------------+ -| 1 | 2016-03-26 15:10:10+00:00 | -| 2 | 2016-03-26 00:00:00+00:00 | -+---+---------------------------+ -# Note that the first timestamp is UTC-05:00, which is the equivalent of EST. -~~~ - -## Supported Casting & Conversion - -`TIMESTAMP` values can be [cast](data-types.html#data-type-conversions-casts) to any of the following data types: - -Type | Details ------|-------- -`INT` | Converts to number of seconds since the Unix epoch (Jan. 1, 1970). This is a CockroachDB experimental feature which may be changed without notice. -`SERIAL` | Converts to number of seconds since the Unix epoch (Jan. 1, 1970). This is a CockroachDB experimental feature which may be changed without notice. -`DECIMAL` | Converts to number of seconds since the Unix epoch (Jan. 1, 1970). This is a CockroachDB experimental feature which may be changed without notice. -`FLOAT` | Converts to number of seconds since the Unix epoch (Jan. 1, 1970). This is a CockroachDB experimental feature which may be changed without notice. -`DATE` | –– -`STRING` | –– - -{{site.data.alerts.callout_info}}Because the SERIAL data type represents values automatically generated by CockroachDB to uniquely identify rows, you cannot meaningfully cast other data types as SERIAL values.{{site.data.alerts.end}} - -## See Also - -[Data Types](data-types.html) diff --git a/src/current/v1.0/transactions.md b/src/current/v1.0/transactions.md deleted file mode 100644 index 3834d78275c..00000000000 --- a/src/current/v1.0/transactions.md +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: Transactions -summary: CockroachDB supports bundling multiple SQL statements into a single all-or-nothing transaction. -toc: true ---- - -CockroachDB supports bundling multiple SQL statements into a single all-or-nothing transaction. Each transaction guarantees [ACID semantics](https://en.wikipedia.org/wiki/ACID) spanning arbitrary tables and rows, even when data is distributed. If a transaction succeeds, all mutations are applied together with virtual simultaneity. If any part of a transaction fails, the entire transaction is aborted, and the database is left unchanged. CockroachDB guarantees that while a transaction is pending, it is isolated from other concurrent transactions. - -{{site.data.alerts.callout_info}}For a detailed discussion of CockroachDB transaction semantics, see How CockroachDB Does Distributed Atomic Transactions and Serializable, Lockless, Distributed: Isolation in CockroachDB. Note that the explanation of the transaction model described in this blog post is slightly out of date. See the Transaction Retries section for more details.{{site.data.alerts.end}} - - -## SQL Statements - -Each of the following SQL statements control transactions in some way. - -| Statement | Function | -|-----------|----------| -| [`BEGIN`](begin-transaction.html) | Initiate a transaction, as well as control its [priority](#transaction-priorities) and [isolation level](#isolation-levels). | -| [`SET TRANSACTION`](set-transaction.html) | Control a transaction's [priority](#transaction-priorities) and [isolation level](#isolation-levels). | -| [`SAVEPOINT cockroach_restart`](savepoint.html) | Declare the transaction as [retryable](#client-side-transaction-retries). This lets you retry the transaction if it doesn't succeed because a higher priority transaction concurrently or recently accessed the same values. | -| [`RELEASE SAVEPOINT cockroach_restart`](release-savepoint.html) | Commit a [retryable transaction](#client-side-transaction-retries). | -| [`COMMIT`](commit-transaction.html) | Commit a non-retryable transaction or clear the connection after committing a retryable transaction. | -| [`ROLLBACK TO SAVEPOINT cockroach_restart`](rollback-transaction.html) | Handle [retryable errors](#error-handling) by rolling back a transaction's changes and increasing its priority. | -| [`ROLLBACK`](rollback-transaction.html) | Abort a transaction and roll the database back to its state before the transaction began. | -| [`SHOW`](show-vars.html) | Display the current transaction settings. | - -## Syntax - -In CockroachDB, a transaction is set up by surrounding SQL statements with the [`BEGIN`](begin-transaction.html) and [`COMMIT`](commit-transaction.html) statements. - -To use [client-side transaction retries](#client-side-transaction-retries), you should also include the `SAVEPOINT cockroach_restart`, `ROLLBACK TO SAVEPOINT cockroach_restart` and `RELEASE SAVEPOINT` statements. - -~~~ sql -> BEGIN; - -> SAVEPOINT cockroach_restart; - - - -> RELEASE SAVEPOINT cockroach_restart; - -> COMMIT; -~~~ - -At any time before it's committed, you can abort the transaction by executing the [`ROLLBACK`](rollback-transaction.html) statement. - -Clients using transactions must also include logic to handle [retries](#transaction-retries). - -## Error Handling - -To handle errors in transactions, you should check for the following types of server-side errors: - -- **Retryable Errors**: Errors with the code `40001` or string `retry transaction`, which indicate the transaction failed because another concurrent or recent transaction accessed the same values. To handle these errors, you should [retry the transaction](#client-side-transaction-retries). - -- **Ambiguous Errors**: Errors with the code `40003` that are returned in response to `RELEASE SAVEPOINT` (or `COMMIT` when not using `SAVEPOINT`), which indicate that the state of the transaction is ambiguous, i.e., you cannot assume it either committed or failed. How you handle these errors depends on how you want to resolve the ambiguity. - - For example, you might want to read values from the database to see if the transaction successfully wrote values before attempting to write the values again or, alternatively, you might write the data again without seeing if the first write attempt succeeded. - - Ambiguous errors are the result of inter-node communication failures which prevent a caller from knowing with certainty whether a transaction commit succeeded. Most applications will choose to simply retry the transaction. - -- **SQL Errors**: All other errors, which indicate that a statement in the transaction failed. For example, violating the Unique constraint generates an `23505` error. After encountering these errors, you can either issue a `COMMIT` or `ROLLBACK` to abort the transaction and revert the database to its state before the transaction began. - - If you want to attempt the same set of statements again, you must begin a completely new transaction. - -## Transaction Retries - -Transactions in CockroachDB lock data resources that are written during their execution. In the event that a pending write from one transaction conflicts with a write of a concurrent transaction, the concurrent transaction must wait for the earlier transaction to complete before proceeding. CockroachDB implements a distributed deadlock detection algorithm to discover dependency cycles. Deadlocks are resolved by allowing transactions with higher priority to abort their dependencies. Transactions which are aborted to avoid deadlock must be retried. - -Transactions executed with the serializable isolation level may also require retries if they experience read/write contention with other concurrent transactions. Note that these types of transaction retries do not occur for transactions executing with the snapshot isolation level. Consider using snapshot isolation if your use case has high contention and your clients are retrying frequently. See [Isolation Levels](#isolation-levels) for more details. - -There are two cases for handling transaction retries: - -- [Automatic retries](#automatic-retries), which CockroachDB processes for you. -- [Client-side intervention](#client-side-intervention), which your application must handle. - -### Automatic Retries - -CockroachDB automatically retries any of the following types of transactions: - -- Individual statements (which are treated as implicit transactions), such as: - - ~~~ sql - > DELETE FROM customers WHERE id = 1; - ~~~ - -- Transactions sent from the client as a single batch. Batching is controlled by your driver or client's behavior, but means that CockroachDB receives all of the statements as a single unit, instead of a number of requests. - - From the perspective of CockroachDB, a transaction sent as a batch looks like this: - - ~~~ sql - > BEGIN; DELETE FROM customers WHERE id = 1; DELETE orders WHERE customer = 1; COMMIT; - ~~~ - - However, in your application's code, batched transactions are often just multiple statements sent at once. For example, in Go, this transaction would be sent as a single batch (and automatically retried): - - ~~~ go - db.Exec( - "BEGIN; - - DELETE FROM customers WHERE id = 1; - - DELETE orders WHERE customer = 1; - - COMMIT;" - ) - ~~~ - -In these cases, CockroachDB infers there is nothing conditional about these values, so it can continue to retry the transaction with the same values it originally received. - -However, if the transaction relies on conditional logic, you should instead write your transactions to use [client-side intervention](#client-side-intervention). This provides an opportunity for the client to check the transaction's conditions before deciding whether or not to retry the transaction, as well as update any values. - -### Client-Side Intervention - -Your application should include client-side retry handling when the statements are sent individually, such as: - -~~~ sql -> BEGIN; - -> UPDATE products SET inventory = 0 WHERE sku = '8675309'; - -> INSERT INTO orders (customer, status) VALUES (1, 'new'); - -> COMMIT; -~~~ - -To indicate a transaction must be retried, CockroachDB surfaces an error with the code `40001` and an error message that begins with the string `retry transaction`. - -To handle these types of errors you have two options: - -- *Recommended*: Use the `SAVEPOINT cockroach_restart` functions to create retryable transactions. Retryable transactions can improve performance because their priority's increased each time they are retried, making them more likely to succeed the longer they're in your system. - - For more information, see [Client-Side Transaction Retries](#client-side-transaction-retries). - -- Abort the transaction using `ROLLBACK`, and then reissue all of the statements in the transaction. This does *not* automatically increase the transaction's priority, so it's possible in high-contention workloads for transactions to take an incredibly long time to succeed. - -#### Client-Side Transaction Retries - -To improve the performance of transactions that fail due to contention, CockroachDB includes a set of statements that let you retry those transactions. Retrying transactions has the benefit of increasing their priority each time they're retried, increasing their likelihood to succeed. - -Retried transactions are also issued at a later timestamp, so the transaction now operates on a later snapshot of the database, so the reads might return updated data. - -Implementing client-side retries requires three statements: - -- [`SAVEPOINT cockroach_restart`](savepoint.html) declares the client's intent to retry the transaction if there are contention errors. It must be executed after `BEGIN` but before the first statement that manipulates a database. - -- [`ROLLBACK TO SAVEPOINT cockroach_restart`](rollback-transaction.html#retry-a-transaction) is used when your application detects `40001` / `retry transaction` errors. It provides you a chance to "retry" the transaction by rolling the database's state back to the beginning of the transaction and increasing the transaction's priority. - - After issuing `ROLLBACK TO SAVEPOINT cockroach_restart`, you must issue any statements you want the transaction to contain. Typically, this means recalculating values and reissuing a similar set of statements to the previous attempt. - -- [`RELEASE SAVEPOINT cockroach_restart`](release-savepoint.html) commits the transaction. At this point, CockroachDB checks to see if the transaction contends with others for access to the same values; the highest priority transaction succeeds, and the others return `40001` / `retry transaction` errors. - - You must also execute `COMMIT` afterward to clear the connection for the next transaction. - -You can find examples of this in the [Syntax](#syntax) section of this page or in our [Build an App with CockroachDB](build-an-app-with-cockroachdb.html) tutorials. - -{{site.data.alerts.callout_success}}If you're building an application in the following languages, we have packages to make client-side retries simpler: -{{site.data.alerts.end}} - -It's also important to note that retried transactions are restarted at a later timestamp. This means that the transaction operates on a later snapshot of the database and related reads might retrieve updated data. - -For greater detail, here's the process a retryable transaction goes through. - -1. The transaction starts with the `BEGIN` statement. - -2. The `SAVEPOINT cockroach_restart` statement declares the intention to retry the transaction in the case of contention errors. Note that CockroachDB's savepoint implementation does not support all savepoint functionality, such as nested transactions. - -3. The statements in the transaction are executed. - -4. If a statement returns a retryable error (identified via the `40001` error code or `retry transaction` string at the start of the error message), you can issue the [`ROLLBACK TO SAVEPOINT cockroach_restart`](rollback-transaction.html) statement to restart the transaction. Alternately, the original `SAVEPOINT cockroach_restart` statement can be reissued to restart the transaction. - - You must now issue the statements in the transaction again. - - In cases where you do not want the application to retry the transaction, you can simply issue `ROLLBACK` at this point. Any other statements will be rejected by the server, as is generally the case after an error has been encountered and the transaction has not been closed. - -5. Once the transaction executes all statements without encountering contention errors, execute [`RELEASE SAVEPOINT cockroach_restart`](release-savepoint.html) to commit the changes. If this succeeds, all changes made by the transaction become visible to subsequent transactions and are guaranteed to be durable if a crash occurs. - - In some cases, the `RELEASE SAVEPOINT` statement itself can fail with a retryable error, mainly because transactions in CockroachDB only realize that they need to be restarted when they attempt to commit. If this happens, the retryable error is handled as described in step 4. - -## Transaction Parameters - -Each transaction is controlled by two parameters: its priority and its -isolation level. The following two sections detail these further. - -### Transaction Priorities - -Every transaction in CockroachDB is assigned an initial **priority**. By default, that priority is `NORMAL`, but for transactions that should be given preference in high-contention scenarios, the client can set the priority within the [`BEGIN`](begin-transaction.html) statement: - -~~~ sql -> BEGIN PRIORITY ; -~~~ - -Alternately, the client can set the priority immediately after the transaction is started as follows: - -~~~ sql -> SET TRANSACTION PRIORITY ; -~~~ - -The client can also display the current priority of the transaction with [`SHOW TRANSACTION PRIORITY`](show-vars.html). - -{{site.data.alerts.callout_info}}When two transactions contend for the same resources indirectly, they may create a dependency cycle leading to a deadlock situation, where both transactions are waiting on the other to finish. In these cases, CockroachDB allows the transaction with higher priority to abort the other, which must then retry. On retry, the transaction inherits the higher priority. This means that each retry makes a transaction more likely to succeed in the event it again experiences deadlock.{{site.data.alerts.end}} - -### Isolation Levels - -CockroachDB supports two transaction isolation levels: `SERIALIZABLE` and `SNAPSHOT`. By default, transactions use the `SERIALIZABLE` isolation level, but the client can explicitly set a transaction's isolation when starting the transaction: - -~~~ sql -> BEGIN ISOLATION LEVEL ; -~~~ - -Alternately, the client can set the isolation level immediately after the transaction is started: - -~~~ sql -> SET TRANSACTION ISOLATION LEVEL ; -~~~ - -The client can also display the current isolation level of the transaction with [`SHOW TRANSACTION ISOLATION LEVEL`](show-vars.html). - -{{site.data.alerts.callout_info}}For a detailed discussion of isolation in CockroachDB transactions, see Serializable, Lockless, Distributed: Isolation in CockroachDB.{{site.data.alerts.end}} - -#### Serializable Isolation - -With `SERIALIZABLE` isolation, a transaction behaves as though it has the entire database all to itself for the duration of its execution. This means that no concurrent writers can affect the transaction unless they commit before it starts, and no concurrent readers can be affected by the transaction until it has successfully committed. This is the strongest level of isolation provided by CockroachDB and it's the default. - -Unlike `SNAPSHOT`, `SERIALIZABLE` isolation permits no anomalies. However, due to CockroachDB's transaction model, `SERIALIZABLE` isolation may require more transaction restarts, especially in the presence of high contention between concurrent transactions. Consider using `SNAPSHOT` isolation for high contention workloads. - -#### Snapshot Isolation - -With `SNAPSHOT` isolation, a transaction behaves as if it were reading the state of the database consistently at a fixed point in time. Unlike the `SERIALIZABLE` level, `SNAPSHOT` isolation permits the [write skew](https://en.wikipedia.org/wiki/Snapshot_isolation) anomaly, but in cases where write skew conditions are unlikely, this isolation level can be highly performant. - -### Comparison to ANSI SQL Isolation Levels - -CockroachDB uses slightly different isolation levels than [ANSI SQL isolation levels](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Isolation_levels). - -#### Aliases - -- `REPEATABLE READ` is an alias for `SERIALIZABLE`. -- `READ UNCOMMITTED` and `READ COMMITTED` are aliases for `SNAPSHOT`. - -{{site.data.alerts.callout_success}}Despite similarity in names, REPEATABLE READ does not equate to SNAPSHOT in CockroachDB. We made this choice to avoid potential confusion between them and the anomalies they can introduce. REPEATABLE READ permits the phantom read anomaly, while SNAPSHOT permits the write skew anomaly.{{site.data.alerts.end}} - -#### Comparison - -- The CockroachDB `SERIALIZABLE` level is stronger than the ANSI SQL `REPEATABLE READ` level and equivalent to the ANSI SQL `SERIALIZABLE` level. -- The CockroachDB `SNAPSHOT` level is stronger than the ANSI SQL `READ UNCOMMITTED` and `READ COMMITTED` levels. - -For more information about the relationship between these levels, see [A Critique of ANSI SQL Isolation Levels](https://arxiv.org/ftp/cs/papers/0701/0701157.pdf). - -## See Also - -- [`BEGIN`](begin-transaction.html) -- [`COMMIT`](commit-transaction.html) -- [`ROLLBACK`](rollback-transaction.html) -- [`SAVEPOINT`](savepoint.html) -- [`RELEASE SAVEPOINT`](release-savepoint.html) -- [`SHOW`](show-vars.html) -- [Retryable function code samples](build-an-app-with-cockroachdb.html) diff --git a/src/current/v1.0/troubleshoot.md b/src/current/v1.0/troubleshoot.md deleted file mode 100644 index 001199f09bc..00000000000 --- a/src/current/v1.0/troubleshoot.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting issues with CockroachDB. -toc: true ---- - -## General Troubleshooting - -When you run into a problem, the best place to start is having CockroachDB logging its output to standard error (instead of log files in the storage directory): - -```shell -$ cockroach start --logtostderr -``` - -You can also have the errors logged in addition to outputting to standard error: - -```shell -$ cockroach start --logtostderr 2>&1 | tee error.log -``` - -## Starting Clusters & Nodes - -### Node Will Not Join Cluster - -**Description**: You specified the `--join` flag, but receive one of the following messages: - -~~~ -not connected to cluster; use --join to specify a connected node -~~~ -~~~ -node 1 belongs to cluster {"cluster hash"} but is attempting to connect to a gossip network for cluster {"another cluster hash"} -~~~ - -**Solution**: Disassociate the node from the existing directory where you've stored CockroachDB data. For example, you can do either of the following: - -- Choose a different directory to store the CockroachDB data: - - ~~~ shell - # Store this node's data in - $ cockroach start --store= --join=:26257 - ~~~ - -- Remove the existing directory and start a node joining the cluster again. - - ~~~ shell - # Remove the directory - $ rm -r cockroach-data/ - - # Start a node joining the cluster - $ cockroach start --join=:26257 - ~~~ - -**Explanation**: When you start a node, the directory you choose to store the data in also identifies the cluster the data came from. This causes conflicts when you've already started a node on the server, have quit `cockroach`, and then try to start a "new" node to a different cluster. Because the existing directory's cluster ID doesn't match the new cluster ID, the node cannot join. - -## Replication - -### Replication Error in a Single-Node Cluster - -When running a single-node CockroachDB cluster for testing, an error about replicas failing will eventually show up in the node's log files, for example: - -~~~ shell -E160407 09:53:50.337328 storage/queue.go:511 [replicate] 7 replicas failing with "0 of 1 store with an attribute matching []; likely not enough nodes in cluster" -~~~ - -This error occurs because CockroachDB expects three nodes by default. If you do not intend to add additional nodes, you can stop this error by updating your default zone configuration to expect only one node: - -~~~ shell -# Insecure cluster: -$ cockroach zone set .default --insecure --disable-replication - -# Secure cluster: -$ cockroach zone set .default --certs-dir= --disable-replication -~~~ - -The `--disable-replication` flag automatically reduces the zone's replica count to 1, but you can do this manually as well: - -~~~ shell -# Insecure cluster: -$ echo 'num_replicas: 1' | cockroach zone set .default --insecure -f - - -# Secure cluster: -$ echo 'num_replicas: 1' | cockroach zone set .default --certs-dir= -f - -~~~ - -See [Configure Replication Zones](configure-replication-zones.html) for more details. - -### Replication Error in a Multi-Node Cluster - -When running a multi-node CockroachDB cluster, if you see an error like the one above about replicas failing, some nodes might not be able to talk to each other. Here are some recommended actions: - -1. Check to make sure that every node but the first was started with the `--join` flag set to the hostname and port of the first node. If the flag was not set correctly for a node, shut down the node and restart it with the `--join` flag set correctly. See [Stop a Node](stop-a-node.html) and [Start a Node](start-a-node.html) for more details. - -2. If all `--join` flags were set correctly, look at the error logs for each node to determine what to do: - - `connection refused`: Check your network or firewall configuration. - - `not connected to cluster` or `node belongs to cluster...`: See [Node Will Not Join Cluster](#node-will-not-join-cluster) on this page. - -## Something Else? - -If we do not have a solution here, you can try using our other [support resources](support-resources.html), including: - -- [CockroachDB Community Forum](https://forum.cockroachlabs.com) -- [CockroachDB Community Slack](https://cockroachdb.slack.com) -- [StackOverflow](http://stackoverflow.com/questions/tagged/cockroachdb) -- [CockroachDB Support Portal](https://support.cockroachlabs.com) diff --git a/src/current/v1.0/troubleshooting-overview.md b/src/current/v1.0/troubleshooting-overview.md deleted file mode 100644 index b8d0a642f54..00000000000 --- a/src/current/v1.0/troubleshooting-overview.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Troubleshooting Overview -summary: If you're having a hard time using CockroachDB, check out this overview of our existing troubleshooting resources -toc: false ---- - -If you run into issues with CockroachDB, we have the following pages to help you resolve the issue: - -- [Common Errors](common-errors.html) helps with issues common to many scenarios. -- [Cluster & Node Setup](cluster-setup-troubleshooting.html) helps start your cluster and scale it by adding nodes. -- [Query Behavior](cluster-setup-troubleshooting.html) helps with unexpected query results. - -## Resources - -If you cannot resolve the issue easily yourself, we have the following tools to help you get unstuck: - -- [Support Resources](support-resources.html) identifies ways you can get help with troubleshooting. -- [File an Issue](file-an-issue.html) provides details about filing issues that you're unable to resolve. diff --git a/src/current/v1.0/truncate.md b/src/current/v1.0/truncate.md deleted file mode 100644 index 20a19b2ff11..00000000000 --- a/src/current/v1.0/truncate.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: TRUNCATE -summary: The TRUNCATE statement deletes all rows from specified tables. -toc: true ---- - -The `TRUNCATE` [statement](sql-statements.html) deletes all rows from specified tables. - -{{site.data.alerts.callout_info}}The TRUNCATE removes all rows from a table by dropping the table and recreating a new table with the same name. For large tables, this is much more performant than deleting each of the rows. However, for smaller tables, it's more performant to use a DELETE statement without a WHERE clause.{{site.data.alerts.end}} - - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/truncate.html %} - -## Required Privileges - -The user must have the `DROP` [privilege](privileges.html) on the table. - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | The [`qualified_name`](sql-grammar.html#qualified_name) of the table to truncate. -`CASCADE` | Truncate all tables with [Foreign Key](foreign-key.html) dependencies on the table being truncated.

    `CASCADE` does not list dependent tables it truncates, so should be used cautiously. -`RESTRICT` | _(Default)_ Do not truncate the table if any other tables have [Foreign Key](foreign-key.html) dependencies on it. - -## Examples - -### Truncate a Table (No Foreign Key Dependencies) - -~~~ sql -> SELECT * FROM t1; -~~~ - -~~~ -+----+------+ -| id | name | -+----+------+ -| 1 | foo | -| 2 | bar | -+----+------+ -(2 rows) -~~~ - -~~~ sql -> TRUNCATE t1; - -> SELECT * FROM t1; -~~~ - -~~~ -+----+------+ -| id | name | -+----+------+ -+----+------+ -(0 rows) -~~~ - -### Truncate a Table and Dependent Tables - -In these examples, the `orders` table has a [Foreign Key](foreign-key.html) relationship to the `customers` table. Therefore, it's only possible to truncate the `customers` table while simultaneously truncating the dependent `orders` table, either using `CASCADE` or explicitly. - -#### Truncate Dependent Tables Using `CASCADE` - -{{site.data.alerts.callout_danger}}CASCADE truncates all dependent tables without listing them, which can lead to inadvertent and difficult-to-recover losses. To avoid potential harm, we recommend truncating tables explicitly in most cases. See
    Truncate Dependent Tables Explicitly for more details.{{site.data.alerts.end}} - -~~~ sql -> TRUNCATE customers; -~~~ - -~~~ -pq: "customers" is referenced by foreign key from table "orders" -~~~ - -~~~sql -> TRUNCATE customers CASCADE; - -> SELECT * FROM customers; -~~~ - -~~~ -+----+-------+ -| id | email | -+----+-------+ -+----+-------+ -(0 rows) -~~~ - -~~~ sql -> SELECT * FROM orders; -~~~ - -~~~ -+----+----------+------------+ -| id | customer | orderTotal | -+----+----------+------------+ -+----+----------+------------+ -(0 rows) -~~~ - -#### Truncate Dependent Tables Explicitly - -~~~ sql -> TRUNCATE customers, orders; - -> SELECT * FROM customers; -~~~ - -~~~ -+----+-------+ -| id | email | -+----+-------+ -+----+-------+ -(0 rows) -~~~ - -~~~ sql -> SELECT * FROM orders; -~~~ - -~~~ -+----+----------+------------+ -| id | customer | orderTotal | -+----+----------+------------+ -+----+----------+------------+ -(0 rows) -~~~ - -## See Also - -- [`DELETE](delete.html) -- [Foreign Key constraint](foreign-key.html) diff --git a/src/current/v1.0/unique.md b/src/current/v1.0/unique.md deleted file mode 100644 index 05c371025ec..00000000000 --- a/src/current/v1.0/unique.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Unique Constraint -summary: The Unique constraint specifies that each non-NULL value in the constrained column must be unique. -toc: true ---- - -The Unique [constraint](constraints.html) specifies that each non-*NULL* value in the constrained column must be unique. - - -## Details - -- You can insert *NULL* values into columns with the Unique constraint because *NULL* is the absence of a value, so it is never equal to other *NULL* values and not considered a duplicate value. This means that it's possible to insert rows that appear to be duplicates if one of the values is *NULL*. - - If you need to strictly enforce uniqueness, use the [Not Null constraint](not-null.html) in addition to the Unique constraint. You can also achieve the same behavior through the table's [Primary Key](primary-key.html). - -- Columns with the Unique constraint automatically have an [index](indexes.html) created with the name `
    __key`. To avoid having two identical indexes, you should not create indexes that exactly match the Unique constraint's columns and order.

    The Unique constraint depends on the automatically created index, so dropping the index also drops the Unique constraint. -- When using the Unique constraint on multiple columns, the collective values of the columns must be unique. This *does not* mean that each value in each column must be unique, as if you had applied the Unique constraint to each column individually. -- You can define the Unique constraint when [creating a table](#syntax), or you can add it to existing tables through [`ADD CONSTRAINT`](add-constraint.html#add-the-unique-constraint). - -## Syntax - -Unique constraints can be defined at the [table level](#table-level). However, if you only want the constraint to apply to a single column, it can be applied at the [column level](#column-level). - -### Column Level - -{% include {{ page.version.version }}/sql/diagrams/unique_column_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_name` | The name of the constrained column. | -| `column_type` | The constrained column's [data type](data-types.html). | -| `column_constraints` | Any other column-level [constraints](constraints.html) you want to apply to this column. | -| `column_def` | Definitions for any other columns in the table. | -| `table_constraints` | Any table-level [constraints](constraints.html) you want to apply. | - -**Example** - -~~~ sql -> CREATE TABLE warehouses ( - warehouse_id INT PRIMARY KEY NOT NULL, - warehouse_name STRING(35) UNIQUE, - location_id INT - ); -~~~ - -### Table Level - -{% include {{ page.version.version }}/sql/diagrams/unique_table_level.html %} - -| Parameter | Description | -|-----------|-------------| -| `table_name` | The name of the table you're creating. | -| `column_def` | Definitions for any other columns in the table. | -| `name` | The name you want to use for the constraint, which must be unique to its table and follow these [identifier rules](keywords-and-identifiers.html#identifiers). | -| `column_name` | The name of the column you want to constrain.| -| `table_constraints` | Any other table-level [constraints](constraints.html) you want to apply. | - -**Example** - -~~~ sql -> CREATE TABLE logon ( - login_id INT PRIMARY KEY, - customer_id INT, - logon_date TIMESTAMP, - UNIQUE (customer_id, logon_date) - ); -~~~ - -## Usage Example - -~~~ sql -> CREATE TABLE IF NOT EXISTS logon ( - login_id INT PRIMARY KEY, - customer_id INT NOT NULL, - sales_id INT, - UNIQUE (customer_id, sales_id) - ); - -> INSERT INTO logon (login_id, customer_id, sales_id) VALUES (1, 2, 1); - -> INSERT INTO logon (login_id, customer_id, sales_id) VALUES (2, 2, 1); -~~~ -~~~ -duplicate key value (customer_id,sales_id)=(2,1) violates unique constraint "logon_customer_id_sales_id_key" -~~~ - -As mentioned in the [details](#details) above, it is possible when using the Unique constraint alone to insert *NULL* values in a way that causes rows to appear to have rows with duplicate values. - -~~~ sql -> INSERT INTO logon (login_id, customer_id, sales_id) VALUES (3, 2, NULL); - -> INSERT INTO logon (login_id, customer_id, sales_id) VALUES (4, 2, NULL); - -> SELECT customer_id, sales_id FROM logon; -~~~ -~~~ -+-------------+----------+ -| customer_id | sales_id | -+-------------+----------+ -| 2 | 1 | -| 2 | NULL | -| 2 | NULL | -+-------------+----------+ -~~~ - -## See Also - -- [Constraints](constraints.html) -- [`DROP CONSTRAINT`](drop-constraint.html) -- [Check constraint](check.html) -- [Default Value constraint](default-value.html) -- [Foreign Key constraint](foreign-key.html) -- [Not Null constraint](not-null.html) -- [Primary Key constraint](primary-key.html) -- [`SHOW CONSTRAINTS`](show-constraints.html) diff --git a/src/current/v1.0/update.md b/src/current/v1.0/update.md deleted file mode 100644 index eed873dbb66..00000000000 --- a/src/current/v1.0/update.md +++ /dev/null @@ -1,409 +0,0 @@ ---- -title: UPDATE -summary: The UPDATE statement updates one or more rows in a table. -toc: true ---- - -The `UPDATE` [statement](sql-statements.html) updates rows in a table. - - -## Required Privileges - -The user must have the `SELECT` and `UPDATE` [privileges](privileges.html) on the table. - -## Synopsis - -
    {% include {{ page.version.version }}/sql/diagrams/update.html %}
    - -## Parameters - -Parameter | Description -----------|------------ -`table_name` | The name of the table that contains the rows you want to update. -`AS name` | An alias for the table name. When an alias is provided, it completely hides the actual table name. -`column_name` | The name of the column whose values you want to update. -`a_expr` | The new value you want to use, the [aggregate function](functions-and-operators.html#aggregate-functions) you want to perform, or the [value expression](sql-expressions.html) you want to use. -`DEFAULT` | To fill columns with their [default values](default-value.html), use `DEFAULT VALUES` in place of `a_expr`. To fill a specific column with its default value, leave the value out of the `a_expr` or use `DEFAULT` at the appropriate position. -`column_name_list` | A comma-separated list of column names, in parentheses. -`select_with_parens` | A comma-separated list of values or [value expressions](sql-expressions.html), in parentheses. To update values of multiple rows, use a comma-separated list of parentheses.

    Each value must match the [data type](data-types.html) of its column. Also, if column names are listed (`qualified_name_list`), values must be in corresponding order; otherwise, they must follow the declared order of the columns in the table. -`WHERE a_expr`| `a_expr` must be an expression that returns Boolean values using columns (e.g., ` = `). Update rows that return `TRUE`.

    __Without a `WHERE` clause in your statement, `UPDATE` updates all rows in the table.__| -`RETURNING target_list` | Return values based on rows updated, where `target_list` can be specific column names from the table, `*` for all columns, or a computation on specific columns.

    To return nothing in the response, not even the number of rows updated, use `RETURNING NOTHING`. - -## Examples - -### Update a Single Column in a Single Row - -~~~ sql -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+----------+ -| id | balance | customer | -+----+----------+----------+ -| 1 | 10000.50 | Ilya | -| 2 | 4000.0 | Julian | -| 3 | 8700.0 | Dario | -| 4 | 3400.0 | Nitin | -+----+----------+----------+ -(4 rows) -~~~ - -~~~ sql -> UPDATE accounts SET balance = 5000.0 WHERE id = 2; - -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+----------+ -| id | balance | customer | -+----+----------+----------+ -| 1 | 10000.50 | Ilya | -| 2 | 5000.0 | Julian | -| 3 | 8700.0 | Dario | -| 4 | 3400.0 | Nitin | -+----+----------+----------+ -(4 rows) -~~~ - -### Update Multiple Columns in a Single Row - -~~~ sql -> UPDATE accounts SET (balance, customer) = (9000.0, 'Kelly') WHERE id = 2; - -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+----------+ -| id | balance | customer | -+----+----------+----------+ -| 1 | 10000.50 | Ilya | -| 2 | 9000.0 | Kelly | -| 3 | 8700.0 | Dario | -| 4 | 3400.0 | Nitin | -+----+----------+----------+ -(4 rows) -~~~ - -~~~ sql -> UPDATE accounts SET balance = 6300.0, customer = 'Stanley' WHERE id = 3; - -> SELECT * FROM accounts; -~~~ - -~~~ -+----+----------+----------+ -| id | balance | customer | -+----+----------+----------+ -| 1 | 10000.50 | Ilya | -| 2 | 9000.0 | Kelly | -| 3 | 6300.0 | Stanley | -| 4 | 3400.0 | Nitin | -+----+----------+----------+ -(4 rows) -~~~ - -### Update Using `SELECT` Statement -~~~ sql -> UPDATE accounts SET (balance, customer) = - (SELECT balance, customer FROM accounts WHERE id = 2) - WHERE id = 4; - -> SELECT * FROM accounts; -~~~ - -~~~ -+----+----------+----------+ -| id | balance | customer | -+----+----------+----------+ -| 1 | 10000.50 | Ilya | -| 2 | 9000.0 | Kelly | -| 3 | 6300.0 | Stanley | -| 4 | 9000.0 | Kelly | -+----+----------+----------+ -(4 rows) -~~~ - -### Update with Default Values - -~~~ sql -> UPDATE accounts SET balance = DEFAULT where customer = 'Stanley'; - -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+----------+ -| id | balance | customer | -+----+----------+----------+ -| 1 | 10000.50 | Ilya | -| 2 | 9000.0 | Kelly | -| 3 | NULL | Stanley | -| 4 | 9000.0 | Kelly | -+----+----------+----------+ -(4 rows) -~~~ - -### Update All Rows - -{{site.data.alerts.callout_danger}}If you do not use the WHERE clause to specify the rows to be updated, the values for all rows will be updated.{{site.data.alerts.end}} - -~~~ sql -> UPDATE accounts SET balance = 5000.0; - -> SELECT * FROM accounts; -~~~ -~~~ -+----+---------+----------+ -| id | balance | customer | -+----+---------+----------+ -| 1 | 5000.0 | Ilya | -| 2 | 5000.0 | Kelly | -| 3 | 5000.0 | Stanley | -| 4 | 5000.0 | Kelly | -+----+---------+----------+ -(4 rows) -~~~ - -### Update and Return Values - -In this example, the `RETURNING` clause returns the `id` value of the row updated. The language-specific versions assume that you have installed the relevant [client drivers](install-client-drivers.html). - -{{site.data.alerts.callout_success}}This use of RETURNING mirrors the behavior of MySQL's last_insert_id() function.{{site.data.alerts.end}} - -{{site.data.alerts.callout_info}}When a driver provides a query() method for statements that return results and an exec() method for statements that do not (e.g., Go), it's likely necessary to use the query() method for UPDATE statements with RETURNING.{{site.data.alerts.end}} - -
    - - - - - -
    - -
    -

    - -~~~ sql -> UPDATE accounts SET balance = DEFAULT WHERE id = 1 RETURNING id; -~~~ - -~~~ -+----+ -| id | -+----+ -| 1 | -+----+ -(1 row) -~~~ - -
    - -
    -

    - -~~~ python -# Import the driver. -import psycopg2 - -# Connect to the "bank" database. -conn = psycopg2.connect( - database='bank', - user='root', - host='localhost', - port=26257 -) - -# Make each statement commit immediately. -conn.set_session(autocommit=True) - -# Open a cursor to perform database operations. -cur = conn.cursor() - -# Update a row in the "accounts" table -# and return the "id" value. -cur.execute( - 'UPDATE accounts SET balance = DEFAULT WHERE id = 1 RETURNING id' -) - -# Print out the returned value. -rows = cur.fetchall() -print('ID:') -for row in rows: - print([str(cell) for cell in row]) - -# Close the database connection. -cur.close() -conn.close() -~~~ - -The printed value would look like: - -~~~ -ID: -['1'] -~~~ - -
    - -
    -

    - -~~~ ruby -# Import the driver. -require 'pg' - -# Connect to the "bank" database. -conn = PG.connect( - user: 'root', - dbname: 'bank', - host: 'localhost', - port: 26257 -) - -# Update a row in the "accounts" table -# and return the "id" value. -conn.exec( - 'UPDATE accounts SET balance = DEFAULT WHERE id = 1 RETURNING id' -) do |res| - -# Print out the returned value. -puts "ID:" - res.each do |row| - puts row - end -end - -# Close communication with the database. -conn.close() -~~~ - -The printed value would look like: - -~~~ -ID: -{"id"=>"1"} -~~~ - -
    - -
    -

    - -~~~ go -package main - -import ( - "database/sql" - "fmt" - "log" - - _ "github.com/lib/pq" -) - -func main() { - //Connect to the "bank" database. - db, err := sql.Open( - "postgres", - "postgresql://root@localhost:26257/bank?sslmode=disable" - ) - if err != nil { - log.Fatal("error connecting to the database: ", err) - } - - // Update a row in the "accounts" table - // and return the "id" value. - rows, err := db.Query( - "UPDATE accounts SET balance = DEFAULT WHERE id = 1 RETURNING id", - ) - if err != nil { - log.Fatal(err) - } - - // Print out the returned value. - defer rows.Close() - fmt.Println("ID:") - for rows.Next() { - var id int - if err := rows.Scan(&id); err != nil { - log.Fatal(err) - } - fmt.Printf("%d\n", id) - } -} -~~~ - -The printed value would look like: - -~~~ -ID: -1 -~~~ - -
    - -
    -

    - -~~~ js -var async = require('async'); - -// Require the driver. -var pg = require('pg'); - -// Connect to the "bank" database. -var config = { - user: 'root', - host: 'localhost', - database: 'bank', - port: 26257 -}; - -pg.connect(config, function (err, client, done) { - // Closes communication with the database and exits. - var finish = function () { - done(); - process.exit(); - }; - - if (err) { - console.error('could not connect to cockroachdb', err); - finish(); - } - async.waterfall([ - function (next) { - // Update a row in the "accounts" table - // and return the "id" value. - client.query( - `UPDATE accounts SET balance = DEFAULT WHERE id = 1 RETURNING id`, - next - ); - } - ], - function (err, results) { - if (err) { - console.error('error updating and selecting from accounts', err); - finish(); - } - // Print out the returned value. - console.log('ID:'); - results.rows.forEach(function (row) { - console.log(row); - }); - - finish(); - }); -}); -~~~ - -The printed value would like: - -~~~ -ID: -{ id: '1' } -~~~ - -
    - - - - - diff --git a/src/current/v1.0/upgrade-cockroach-version.md b/src/current/v1.0/upgrade-cockroach-version.md deleted file mode 100644 index ef157d51a59..00000000000 --- a/src/current/v1.0/upgrade-cockroach-version.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Upgrade a Cluster's Version -summary: Learn how to upgrade your CockroachDB cluster to a new version. -toc: true -toc_not_nested: true ---- - -Because of CockroachDB's [multi-active availability](multi-active-availability.html) design, you can perform a "rolling upgrade" of your CockroachDB cluster. This means that you can upgrade nodes one at a time without interrupting the cluster's overall health and operations. - -{{site.data.alerts.callout_info}}This page shows you how to upgrade from v1.0 to a patch release in the 1.0.x series. To upgrade from v1.0.x to v1.1, see the 1.1 version of this page.{{site.data.alerts.end}} - - -## Step 1. Prepare to upgrade - -Before starting the upgrade, complete the following steps. - -1. Make sure your cluster is behind a load balancer, or your clients are configured to talk to multiple nodes. If your application communicates with a single node, stopping that node to upgrade its CockroachDB binary will cause your application to fail. - -2. Verify the cluster's overall health by running the [`cockroach node status`](view-node-details.html) command against any node in the cluster. - - In the response: - - If any nodes that should be live are not listed, identify why the nodes are offline and restart them before begining your upgrade. - - Make sure the `build` field shows the same version of CockroachDB for all nodes. If any nodes are behind, upgrade them to the cluster's current version first, and then start this process over. - - Make sure `ranges_unavailable` and `ranges_underreplicated` show `0` for all nodes. If there are unavailable or underreplicated ranges in your cluster, performing a rolling upgrade increases the risk that ranges will lose a majority of their replicas and cause cluster unavailability. Therefore, it's important to identify and resolve the cause of range unavailability and underreplication before beginning your upgrade. - -3. Capture the cluster's current state by running the [`cockroach debug zip`](debug-zip.html) command against any node in the cluster. If the upgrade does not go according to plan, the captured details will help you and Cockroach Labs troubleshoot issues. - -4. [Back up the cluster](back-up-data.html). If the upgrade does not go according to plan, you can use the data to restore your cluster to its previous state. - -## Step 2. Perform the rolling upgrade - -For each node in your cluster, complete the following steps. - -{{site.data.alerts.callout_success}}We recommend creating scripts to perform these steps instead of performing them by hand.{{site.data.alerts.end}} - -{{site.data.alerts.callout_danger}}Upgrade only one node at a time, and wait at least one minute after a node rejoins the cluster to upgrade the next node. Simultaneously upgrading more than one node increases the risk that ranges will lose a majority of their replicas and cause cluster unavailability.{{site.data.alerts.end}} - -1. Connect to the node. - -2. Terminate the `cockroach` process. - - Without a process manager, use this command: - - {% include copy-clipboard.html %} - ~~~ shell - $ pkill cockroach - ~~~ - - Then verify that the process has stopped: - - {% include copy-clipboard.html %} - ~~~ shell - $ ps aux | grep cockroach - ~~~ - - Alternately, you can check the node's logs for the message `server drained and shutdown completed`. - -3. Download and install the CockroachDB binary you want to use: - -
    - - -
    -

    - -
    - {% include copy-clipboard.html %} - ~~~ shell - $ curl https://binaries.cockroachdb.com/cockroach-{{page.release_info.version}}.darwin-10.9-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ tar -xzf cockroach-{{page.release_info.version}}.darwin-10.9-amd64.tgz - ~~~ -
    - -
    - {% include copy-clipboard.html %} - ~~~ shell - $ curl https://binaries.cockroachdb.com/cockroach-{{page.release_info.version}}.linux-amd64.tgz - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ tar -xzf cockroach-{{page.release_info.version}}.linux-amd64.tgz - ~~~ -
    - -4. If you use `cockroach` in your `$PATH`, rename the outdated `cockroach` binary, and then move the new one into its place: - -
    - - -
    -

    - -
    - {% include copy-clipboard.html %} - ~~~ shell - i="$(which cockroach)"; mv "$i" "$i"_old - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ cp -i cockroach-{{page.release_info.version}}.darwin-10.9-amd64/cockroach /usr/local/bin/cockroach - ~~~ -
    - -
    - {% include copy-clipboard.html %} - ~~~ shell - i="$(which cockroach)"; mv "$i" "$i"_old - ~~~ - - {% include copy-clipboard.html %} - ~~~ shell - $ cp -i cockroach-{{page.release_info.version}}.linux-amd64/cockroach /usr/local/bin/cockroach - ~~~ -
    - -5. If you're running with a process manager, have the node rejoin the cluster by starting it. - - Without a process manager, use this command: - - {% include copy-clipboard.html %} - ~~~ shell - $ cockroach start --join=[IP address of any other node] [other flags] - ~~~ - `[other flags]` includes any flags you [use to a start node](start-a-node.html), such as `--host`. - -6. Verify the node has rejoined the cluster through its output to `stdout` or through the [admin UI](explore-the-admin-ui.html). - -7. If you use `cockroach` in your `$PATH`, you can remove the old binary: - - {% include copy-clipboard.html %} - ~~~ shell - $ rm /usr/local/bin/cockroach_old - ~~~ - - If you leave versioned binaries on your servers, you do not need to do anything. - -8. Wait at least one minute after the node has rejoined the cluster, and then repeat these steps for the next node. - -## Step 3. Monitor the upgraded cluster - -After upgrading all nodes in the cluster, monitor the cluster's stability and performance for at least one day. - -If you experience any problems, follow these steps to troubleshoot and, if necessary, downgrade the cluster: - -1. Run the [`cockroach debug zip`](debug-zip.html) command against any node in the cluster to capture your cluster's state. - -2. [Reach out for support](support-resources.html) from Cockroach Labs, sharing your debug zip. - -3. If necessary, downgrade the cluster by repeating the [rolling upgrade process](#step-2-perform-the-rolling-upgrade), but this time switching each node back to the previous version in the 1.0.x series. - -## See Also - -- [View Node Details](view-node-details.html) -- [Collect Debug Information](debug-zip.html) -- [View Version Details](view-version-details.html) -- [Release notes for our latest version](../releases/{{page.version.version}}.html) diff --git a/src/current/v1.0/upsert.md b/src/current/v1.0/upsert.md deleted file mode 100644 index 5d68b0dcb40..00000000000 --- a/src/current/v1.0/upsert.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: UPSERT -summary: The UPSERT statement inserts rows when values do not violate uniqueness constraints, and it updates rows when values do violate uniqueness constraints. -toc: true ---- - -The `UPSERT` [statement](sql-statements.html) is short-hand for [`INSERT ON CONFLICT`](insert.html). It inserts rows in cases where specified values do not violate uniqueness constraints, and it updates rows in cases where values do violate uniqueness constraints. - - -## Considerations - -- `UPSERT` considers uniqueness only for [Primary Key](primary-key.html) columns. `INSERT ON CONFLICT` is more flexible and can be used to consider uniqueness for other columns. For more details, see [How `UPSERT` Transforms into `INSERT ON CONFLICT`](#how-upsert-transforms-into-insert-on-conflict) below. - -- When inserting/updating all columns of a table, and the table has no secondary indexes, `UPSERT` will be faster than the equivalent `INSERT ON CONFLICT` statement, as it will write without first reading. This may be particularly useful if you are using a simple SQL table of two columns to [simulate direct KV access](frequently-asked-questions.html#can-i-use-cockroachdb-as-a-key-value-store). - -## Required Privileges - -The user must have the `INSERT` and `UPDATE` [privileges](privileges.html) on the table. - -## Synopsis - -{% include {{ page.version.version }}/sql/diagrams/upsert.html %} - -## Parameters - -Parameter | Description -----------|------------ -`qualified_name` | The name of the table. -`AS name` | An alias for the table name. When an alias is provided, it completely hides the actual table name. -`qualified_name_list` | A comma-separated list of column names, in parentheses. -`select_stmt` | A comma-separated list of column values for a single row, in parentheses. To upsert values into multiple rows, use a comma-separated list of parentheses. Alternately, you can use [`SELECT`](select.html) statements to retrieve values from other tables and upsert them.

    Each value must match the [data type](data-types.html) of its column. Also, if column names are listed (`qualified_name_list`), values must be in corresponding order; otherwise, they must follow the declared order of the columns in the table. -`DEFAULT VALUES` | To fill all columns with their [default values](default-value.html), use `DEFAULT VALUES` in place of `select_stmt`. To fill a specific column with its default value, leave the value out of the `select_stmt` or use `DEFAULT` at the appropriate position. -`RETURNING target_list` | When there are no uniqueness violations, return values based on rows inserted, where `target_list` can be specific column names from the table, `*` for all columns, or a computation on specific columns. When there are uniqueness constraints, `RETURNING` is not supported.

    Within a [transaction](transactions.html), use `RETURNING NOTHING` to return nothing in the response, not even the number of rows affected. - -## How `UPSERT` Transforms into `INSERT ON CONFLICT` - -`UPSERT` considers uniqueness only for [primary key](primary-key.html) columns. For example, assuming that columns `a` and `b` are the primary key, the following `UPSERT` and `INSERT ON CONFLICT` statements are equivalent: - -~~~ sql -> UPSERT INTO t (a, b, c) VALUES (1, 2, 3); - -> INSERT INTO t (a, b, c) - VALUES (1, 2, 3) - ON CONFLICT (a, b) - DO UPDATE SET c = excluded.c; -~~~ - -`INSERT ON CONFLICT` is more flexible and can be used to consider uniqueness for columns not in the primary key. For more details, see the [Upsert that Fails (Conflict on Non-Primary Key)](#upsert-that-fails-conflict-on-non-primary-key) example below. - -## Examples - -### Upsert that Inserts a Row (No Conflict) - -In this example, the `id` column is the primary key. Because the inserted `id` value does not conflict with the `id` value of any existing row, the `UPSERT` statement inserts a new row into the table. - -~~~ sql -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.5 | -| 2 | 20000.75 | -+----+----------+ -~~~ -~~~ sql -> UPSERT INTO accounts (id, balance) VALUES (3, 6325.20); - -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.5 | -| 2 | 20000.75 | -| 3 | 6325.2 | -+----+----------+ -~~~ - -### Upsert that Updates a Row (Conflict on Primary Key) - -In this example, the `id` column is the primary key. Because the inserted `id` value is not unique, the `UPSERT` statement updates the row with the new `balance`. - -~~~ sql -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.5 | -| 2 | 20000.75 | -| 3 | 6325.2 | -+----+----------+ -~~~ -~~~ sql -> UPSERT INTO accounts (id, balance) VALUES (3, 7500.83); - -> SELECT * FROM accounts; -~~~ -~~~ -+----+----------+ -| id | balance | -+----+----------+ -| 1 | 10000.5 | -| 2 | 20000.75 | -| 3 | 7500.83 | -+----+----------+ -~~~ - -### Upsert that Fails (Conflict on Non-Primary Key) - -`UPSERT` will not update rows when the uniquness conflict is on columns not in the primary key. In this example, the `a` column is the primary key, but the `b` column also has the [Unique constraint](unique.html). Because the inserted `b` value is not unique, the `UPSERT` fails. - -~~~ sql -> SELECT * FROM unique_test; -~~~ -~~~ -+---+---+ -| a | b | -+---+---+ -| 1 | 1 | -| 2 | 2 | -| 3 | 3 | -+---+---+ -~~~ -~~~ sql -> UPSERT INTO unique_test VALUES (4, 1); -~~~ -~~~ -pq: duplicate key value (b)=(1) violates unique constraint "unique_test_b_key" -~~~ - -In such a case, you would need to use the [`INSERT ON CONFLICT`](insert.html) statement to specify the `b` column as the column with the Unique constraint. - -~~~ sql -> INSERT INTO unique_test VALUES (4, 1) ON CONFLICT (b) DO UPDATE SET a = excluded.a; - -> SELECT * FROM unique_test; -~~~ -~~~ -+---+---+ -| a | b | -+---+---+ -| 2 | 2 | -| 3 | 3 | -| 4 | 1 | -+---+---+ -~~~ - -## See Also - -- [`INSERT`](insert.html) -- [Other SQL Statements](sql-statements.html) diff --git a/src/current/v1.0/use-the-built-in-sql-client.md b/src/current/v1.0/use-the-built-in-sql-client.md deleted file mode 100644 index 65405bd1341..00000000000 --- a/src/current/v1.0/use-the-built-in-sql-client.md +++ /dev/null @@ -1,363 +0,0 @@ ---- -title: Use the Built-in SQL Client -summary: CockroachDB comes with a built-in client for executing SQL statements from an interactive shell or directly from the command line. -toc: true ---- - -CockroachDB comes with a built-in client for executing SQL statements from an interactive shell or directly from the command line. To use this client, run the `cockroach sql` [command](cockroach-commands.html) as described below. - -To exit the interactive shell, use `\q` or `ctrl-d`. - - -## Synopsis - -~~~ shell -# Start the interactive SQL shell: -$ cockroach sql - -# Execute SQL from the command line: -$ cockroach sql --execute=";" --execute="" -$ echo ";" | cockroach sql -$ cockroach sql < file-containing-statements.sql - -# View help: -$ cockroach sql --help -~~~ - -## Flags - -The `sql` command supports the following [general-use](#general) and [logging](#logging) flags. - -### General - -- To start an interactive SQL shell, run `cockroach sql` with all appropriate connection flags or use just the `--url` flag, which includes connection details. -- To execute SQL statements from the command line, use the `--execute` flag. - -Flag | Description ------|------------ -`--certs-dir` | The path to the [certificate directory](create-security-certificates.html). The directory must contain valid certificates if running in secure mode.

    **Env Variable:** `COCKROACH_CERTS_DIR`
    **Default:** `${HOME}/.cockroach-certs/` -`--database`
    `-d` | The database to connect to.

    **Env Variable:** `COCKROACH_DATABASE` -`--execute`
    `-e` | Execute SQL statements directly from the command line, without opening a shell. This flag can be set multiple times, and each instance can contain one or more statements separated by semi-colons. If an error occurs in any statement, the command exits with a non-zero status code and further statements are not executed. The results of each statement are printed to the standard output (see `--pretty` for formatting options).

    For a demonstration of this and other ways to execute SQL from the command line, see the [examples](#execute-sql-statements-from-the-command-line) below. -`--host` | The server host to connect to. This can be the address of any node in the cluster.

    **Env Variable:** `COCKROACH_HOST`
    **Default:** `localhost` -`--insecure` | Run in insecure mode. If this flag is not set, the `--certs-dir` flag must point to valid certificates.

    **Env Variable:** `COCKROACH_INSECURE`
    **Default:** `false` -`--port`
    `-p` | The server port to connect to.

    **Env Variable:** `COCKROACH_PORT`
    **Default:** `26257` -`--pretty` | Format table rows printed to the standard output using ASCII art and disable escaping of special characters.

    When disabled with `--pretty=false`, or when the standard output is not a terminal, table rows are printed as tab-separated values, and special characters are escaped. This makes the output easy to parse by other programs.

    **Default:** `true` when output is a terminal, `false` otherwise -`--url` | The connection URL. If you use this flag, do not set any other connection flags.

    For insecure connections, the URL format is:
    `--url=postgresql://@:/?sslmode=disable`

    For secure connections, the URL format is:
    `--url=postgresql://@:/`
    with the following parameters in the query string:
    `sslcert=`
    `sslkey=`
    `sslmode=verify-full`
    `sslrootcert=`

    **Env Variable:** `COCKROACH_URL` -`--user`
    `-u` | The [user](create-and-manage-users.html) connecting to the database. The user must have [privileges](privileges.html) for any statement executed.

    **Env Variable:** `COCKROACH_USER`
    **Default:** `root` - -### Logging - -By default, the `sql` command logs errors to `stderr`. - -If you need to troubleshoot this command's behavior, you can change its [logging behavior](debug-and-error-logs.html). - -## SQL Shell Commands - -The following commands can be used within the interactive SQL shell: - -Command | Usage ---------|------ -`\q`
    `ctrl-d` | Exit the shell.

    When no text follows the prompt, `ctrl-c` exits the shell as well; otherwise, `ctrl-c` clears the line. -`\!` | Run an external command and print its results to `stdout`. See the [example](#run-external-commands-from-the-sql-shell) below. -\| | Run the output of an external command as SQL statements. See the [example](#run-external-commands-from-the-sql-shell) below. -`\set