diff --git a/doc/user/content/integrations/client-libraries/golang.md b/doc/user/content/integrations/client-libraries/golang.md index 47ac66968a4fc..a99e6f292643f 100644 --- a/doc/user/content/integrations/client-libraries/golang.md +++ b/doc/user/content/integrations/client-libraries/golang.md @@ -107,9 +107,8 @@ Typically, you create sources, views, and indexes when deploying Materialize, bu ```go createSourceSQL := ` - CREATE SOURCE IF NOT EXISTS counter - FROM LOAD GENERATOR COUNTER - (TICK INTERVAL '500ms'); + CREATE SOURCE IF NOT EXISTS auction + FROM LOAD GENERATOR AUCTION FOR ALL TABLES; ` _, err = conn.Exec(ctx, createSourceSQL) @@ -123,9 +122,9 @@ For more information, see [`CREATE SOURCE`](/sql/create-source/). ```go createViewSQL := ` - CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS - SELECT sum(counter) - FROM counter; + CREATE MATERIALIZED VIEW IF NOT EXISTS amount_sum AS + SELECT sum(amount) + FROM bids; ` _, err = conn.Exec(ctx, createViewSQL) @@ -150,7 +149,7 @@ if err != nil { } defer tx.Rollback(ctx) -_, err = tx.Exec(ctx, "DECLARE c CURSOR FOR SUBSCRIBE counter_sum") +_, err = tx.Exec(ctx, "DECLARE c CURSOR FOR SUBSCRIBE amount_sum") if err != nil { log.Fatal(err) return @@ -180,7 +179,7 @@ if err != nil { } ``` -The [SUBSCRIBE output format](/sql/subscribe/#output) of `subscribeResult` contains all of the columns of `counter_sum`, prepended with several additional columns that describe the nature of the update. When a row of a subscribed view is **updated,** two objects will show up in the result set: +The [SUBSCRIBE output format](/sql/subscribe/#output) of `subscribeResult` contains all of the columns of `amount_sum`, prepended with several additional columns that describe the nature of the update. When a row of a subscribed view is **updated,** two objects will show up in the result set: ```go {MzTimestamp:1646868332570 MzDiff:1 row...} @@ -194,8 +193,8 @@ An `MzDiff` value of `-1` indicates that Materialize is deleting one row with th To clean up the sources, views, and tables that we created, first connect to Materialize using a [PostgreSQL client](/integrations/sql-clients/) and then, run the following commands: ```mzsql -DROP MATERIALIZED VIEW IF EXISTS counter_sum; -DROP SOURCE IF EXISTS counter; +DROP MATERIALIZED VIEW IF EXISTS amount_sum; +DROP SOURCE IF EXISTS auction CASCADE; DROP TABLE IF EXISTS countries; ``` diff --git a/doc/user/content/integrations/client-libraries/java-jdbc.md b/doc/user/content/integrations/client-libraries/java-jdbc.md index 0fa0d071ae054..001de448a3cdb 100644 --- a/doc/user/content/integrations/client-libraries/java-jdbc.md +++ b/doc/user/content/integrations/client-libraries/java-jdbc.md @@ -275,7 +275,7 @@ public class App { public void source() { - String SQL = "CREATE SOURCE counter FROM LOAD GENERATOR COUNTER;"; + String SQL = "CREATE SOURCE auction FROM LOAD GENERATOR AUCTION FOR ALL TABLES;"; try (Connection conn = connect()) { Statement st = conn.createStatement(); @@ -301,9 +301,9 @@ For more information, see [`CREATE SOURCE`](/sql/create-source/). ```java public void view() { - String SQL = "CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS " - + "SELECT sum(counter)" - + "FROM counter;"; + String SQL = "CREATE MATERIALIZED VIEW IF NOT EXISTS amount_sum AS " + + "SELECT sum(amount)" + + "FROM bids;"; try (Connection conn = connect()) { Statement st = conn.createStatement(); @@ -358,7 +358,7 @@ public class App { Statement stmt = conn.createStatement(); stmt.execute("BEGIN"); - stmt.execute("DECLARE c CURSOR FOR SUBSCRIBE counter_sum"); + stmt.execute("DECLARE c CURSOR FOR SUBSCRIBE amount_sum"); while (true) { ResultSet rs = stmt.executeQuery("FETCH ALL c"); if(rs.next()) { @@ -394,8 +394,8 @@ A `mz_diff` value of `-1` indicates that Materialize is deleting one row with th To clean up the sources, views, and tables that we created, first connect to Materialize using a [PostgreSQL client](/integrations/sql-clients/) and then, run the following commands: ```mzsql -DROP MATERIALIZED VIEW IF EXISTS counter_sum; -DROP SOURCE IF EXISTS counter; +DROP MATERIALIZED VIEW IF EXISTS amount_sum; +DROP SOURCE IF EXISTS auction CASCADE; DROP TABLE IF EXISTS countries; ``` diff --git a/doc/user/content/integrations/client-libraries/node-js.md b/doc/user/content/integrations/client-libraries/node-js.md index 15999ef8bc0aa..eef15b6224479 100644 --- a/doc/user/content/integrations/client-libraries/node-js.md +++ b/doc/user/content/integrations/client-libraries/node-js.md @@ -149,7 +149,7 @@ const client = new Client({ async function main() { await client.connect(); const res = await client.query( - `CREATE SOURCE counter FROM LOAD GENERATOR COUNTER;` + `CREATE SOURCE auction FROM LOAD GENERATOR AUCTION FOR ALL TABLES;` ); console.log(res); } @@ -176,9 +176,9 @@ const client = new Client({ async function main() { await client.connect(); const res = await client.query( - `CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS - SELECT sum(counter) - FROM counter;` + `CREATE MATERIALIZED VIEW IF NOT EXISTS amount_sum AS + SELECT sum(amount) + FROM bids;` ); console.log(res); } @@ -209,7 +209,7 @@ async function main() { await client.connect(); await client.query('BEGIN'); - await client.query('DECLARE c CURSOR FOR SUBSCRIBE counter_sum WITH (SNAPSHOT = FALSE)'); + await client.query('DECLARE c CURSOR FOR SUBSCRIBE amount_sum WITH (SNAPSHOT = FALSE)'); while (true) { const res = await client.query('FETCH ALL c'); @@ -265,8 +265,8 @@ client.connect((err, client) => { To clean up the sources, views, and tables that we created, first connect to Materialize using a [PostgreSQL client](/integrations/sql-clients/) and then, run the following commands: ```mzsql -DROP MATERIALIZED VIEW IF EXISTS counter_sum; -DROP SOURCE IF EXISTS counter; +DROP MATERIALIZED VIEW IF EXISTS amount_sum; +DROP SOURCE IF EXISTS auction CASCADE; DROP TABLE IF EXISTS countries; ``` diff --git a/doc/user/content/integrations/client-libraries/php.md b/doc/user/content/integrations/client-libraries/php.md index b1993402677be..bb61827a0e940 100644 --- a/doc/user/content/integrations/client-libraries/php.md +++ b/doc/user/content/integrations/client-libraries/php.md @@ -118,7 +118,7 @@ Typically, you create sources, views, and indexes when deploying Materialize, al // Include the Postgres connection details require 'connect.php'; -$sql = "CREATE SOURCE counter FROM LOAD GENERATOR COUNTER;"; +$sql = "CREATE SOURCE auction FROM LOAD GENERATOR AUCTION FOR ALL TABLES;"; $statement = $connection->prepare($sql); $statement->execute(); @@ -139,7 +139,7 @@ For more information, see [`CREATE SOURCE`](/sql/create-source/). // Include the Postgres connection details require 'connect.php'; -$sql = "CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS SELECT SUM(value) FROM counter;"; +$sql = "CREATE MATERIALIZED VIEW IF NOT EXISTS amount_sum AS SELECT SUM(amount) FROM bids;"; $statement = $connection->prepare($sql); $statement->execute(); @@ -166,7 +166,7 @@ require 'connect.php'; // Begin a transaction $connection->beginTransaction(); // Declare a cursor -$statement = $connection->prepare('DECLARE c CURSOR FOR SUBSCRIBE counter_sum WITH (FETCH = true);'); +$statement = $connection->prepare('DECLARE c CURSOR FOR SUBSCRIBE amount_sum WITH (FETCH = true);'); // Execute the statement $statement->execute(); @@ -214,8 +214,8 @@ An `mz_diff` value of `-1` indicates Materialize is deleting one row with the in To clean up the sources, views, and tables that we created, first connect to Materialize using a [PostgreSQL client](/integrations/sql-clients/) and then, run the following commands: ```mzsql -DROP MATERIALIZED VIEW IF EXISTS counter_sum; -DROP SOURCE IF EXISTS counter; +DROP MATERIALIZED VIEW IF EXISTS ammount_sum; +DROP SOURCE IF EXISTS auction CASCADE; DROP TABLE IF EXISTS countries; ``` diff --git a/doc/user/content/integrations/client-libraries/python.md b/doc/user/content/integrations/client-libraries/python.md index 6b59237009e17..0d85fb3c03caa 100644 --- a/doc/user/content/integrations/client-libraries/python.md +++ b/doc/user/content/integrations/client-libraries/python.md @@ -119,7 +119,7 @@ conn = psycopg2.connect(dsn) conn.autocommit = True with conn.cursor() as cur: - cur.execute("CREATE SOURCE counter FROM LOAD GENERATOR COUNTER;") + cur.execute("CREATE SOURCE auction FROM LOAD GENERATOR AUCTION FOR ALL TABLES;") with conn.cursor() as cur: cur.execute("SHOW SOURCES") @@ -141,9 +141,9 @@ conn = psycopg2.connect(dsn) conn.autocommit = True with conn.cursor() as cur: - cur.execute("CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS " \ - "SELECT sum(counter)" \ - "FROM counter;") + cur.execute("CREATE MATERIALIZED VIEW IF NOT EXISTS amount_sum AS " \ + "SELECT sum(amount)" \ + "FROM bids;") with conn.cursor() as cur: cur.execute("SHOW VIEWS") @@ -168,7 +168,7 @@ dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_ conn = psycopg2.connect(dsn) with conn.cursor() as cur: - cur.execute("DECLARE c CURSOR FOR SUBSCRIBE counter_sum") + cur.execute("DECLARE c CURSOR FOR SUBSCRIBE amount_sum") while True: cur.execute("FETCH ALL c") for row in cur: @@ -204,7 +204,7 @@ dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_ conn = psycopg.connect(dsn) with conn.cursor() as cur: - for row in cur.stream("SUBSCRIBE counter_sum"): + for row in cur.stream("SUBSCRIBE amount_sum"): print(row) ``` @@ -213,8 +213,8 @@ with conn.cursor() as cur: To clean up the sources, views, and tables that we created, first connect to Materialize using a [PostgreSQL client](/integrations/sql-clients/) and then, run the following commands: ```mzsql -DROP MATERIALIZED VIEW IF EXISTS counter_sum; -DROP SOURCE IF EXISTS counter; +DROP MATERIALIZED VIEW IF EXISTS amount_sum; +DROP SOURCE IF EXISTS auction CASCADE; DROP TABLE IF EXISTS countries; ``` diff --git a/doc/user/content/integrations/client-libraries/ruby.md b/doc/user/content/integrations/client-libraries/ruby.md index 37ec2cec22d5d..1011673adb7f9 100644 --- a/doc/user/content/integrations/client-libraries/ruby.md +++ b/doc/user/content/integrations/client-libraries/ruby.md @@ -102,7 +102,7 @@ conn = PG.connect(host:"MATERIALIZE_HOST", port: 6875, user: "MATERIALIZE_USERNA # Create a source src = conn.exec( - "CREATE SOURCE IF NOT EXISTS counter FROM LOAD GENERATOR counter;" + "CREATE SOURCE IF NOT EXISTS auction FROM LOAD GENERATOR AUCTION FOR ALL TABLES;" ); puts src.inspect @@ -125,9 +125,9 @@ conn = PG.connect(host:"MATERIALIZE_HOST", port: 6875, user: "MATERIALIZE_USERNA # Create a view view = conn.exec( - "CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS - SELECT sum(counter) - FROM counter;" + "CREATE MATERIALIZED VIEW IF NOT EXISTS amount_sum AS + SELECT sum(amount) + FROM bids;" ); puts view.inspect @@ -153,7 +153,7 @@ require 'pg' # Locally running instance: conn = PG.connect(host:"MATERIALIZE_HOST", port: 6875, user: "MATERIALIZE_USERNAME", password: "MATERIALIZE_PASSWORD") conn.exec('BEGIN') -conn.exec('DECLARE c CURSOR FOR SUBSCRIBE counter_sum') +conn.exec('DECLARE c CURSOR FOR SUBSCRIBE amount_sum') while true conn.exec('FETCH c') do |result| @@ -182,8 +182,8 @@ An `mz_diff` value of `-1` indicates Materialize is deleting one row with the in To clean up the sources, views, and tables that we created, first connect to Materialize using a [PostgreSQL client](/integrations/sql-clients/) and then, run the following commands: ```mzsql -DROP MATERIALIZED VIEW IF EXISTS counter_sum; -DROP SOURCE IF EXISTS counter; +DROP MATERIALIZED VIEW IF EXISTS amount_sum; +DROP SOURCE IF EXISTS auction CASCADE; DROP TABLE IF EXISTS countries; ``` diff --git a/doc/user/content/integrations/client-libraries/rust.md b/doc/user/content/integrations/client-libraries/rust.md index 33484f376f7fc..45175816123ae 100644 --- a/doc/user/content/integrations/client-libraries/rust.md +++ b/doc/user/content/integrations/client-libraries/rust.md @@ -113,9 +113,8 @@ pub(crate) fn create_source() -> Result { client.execute( " - CREATE SOURCE IF NOT EXISTS counter - FROM LOAD GENERATOR COUNTER - (TICK INTERVAL '500ms'); + CREATE SOURCE IF NOT EXISTS auction + FROM LOAD GENERATOR AUCTION FOR ALL TABLES; ", &[], ) @@ -134,9 +133,9 @@ pub(crate) fn create_materialized_view() -> Result { client.execute( " - CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS - SELECT sum(counter) - FROM counter; + CREATE MATERIALIZED VIEW IF NOT EXISTS amount_sum AS + SELECT sum(amount) + FROM bids; ", &[], ) @@ -153,7 +152,7 @@ use crate::connection::create_client; pub(crate) fn subscribe() { let mut client = create_client().expect("Error creating client."); let mut transaction = client.transaction().expect("Error creating transaction."); - transaction.execute("DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum::text FROM counter_sum) WITH (SNAPSHOT = false);", &[]).expect("Error creating cursor."); + transaction.execute("DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum::text FROM amount_sum) WITH (SNAPSHOT = false);", &[]).expect("Error creating cursor."); loop { let results = transaction.query("FETCH ALL c;", &[]).expect("Error running fetch."); @@ -164,15 +163,15 @@ pub(crate) fn subscribe() { } ``` -The [SUBSCRIBE output format](/sql/subscribe/#output) of the `counter_sum` view contains all of the columns of the view, prepended with several additional columns that describe the nature of the update. +The [SUBSCRIBE output format](/sql/subscribe/#output) of the `amount_sum` view contains all of the columns of the view, prepended with several additional columns that describe the nature of the update. ## Clean up To clean up the sources, views, and tables that we created, first connect to Materialize using a [PostgreSQL client](/integrations/sql-clients/) and then, run the following commands: ```mzsql -DROP MATERIALIZED VIEW IF EXISTS counter_sum; -DROP SOURCE IF EXISTS counter; +DROP MATERIALIZED VIEW IF EXISTS amount_sum; +DROP SOURCE IF EXISTS auction CASCADE; DROP TABLE IF EXISTS countries; ``` diff --git a/doc/user/content/releases/v0.94.md b/doc/user/content/releases/v0.94.md index 94a75cb0ea7a4..857360e0684b3 100644 --- a/doc/user/content/releases/v0.94.md +++ b/doc/user/content/releases/v0.94.md @@ -12,6 +12,6 @@ patch: 2 * Set subsources into an errored state in the [PostgreSQL source](/sql/create-source/postgres/) if the corresponding table is dropped from the publication upstream. -* Add a `KEY VALUE` [load generator source](/sql/create-source/load-generator/#key-value), +* Add a `KEY VALUE` load generator source, which produces keyed data that can be passed through to [`ENVELOPE UPSERT`](/sql/create-source/#upsert-envelope). This is useful for internal testing. diff --git a/doc/user/content/sql/create-source/load-generator.md b/doc/user/content/sql/create-source/load-generator.md index 84337671eb62f..49658635128b3 100644 --- a/doc/user/content/sql/create-source/load-generator.md +++ b/doc/user/content/sql/create-source/load-generator.md @@ -31,7 +31,6 @@ Field | Use ------|----- _src_name_ | The name for the source. **IN CLUSTER** _cluster_name_ | The [cluster](/sql/create-cluster) to maintain this source. -**COUNTER** | Use the [counter](#counter) load generator. **AUCTION** | Use the [auction](#auction) load generator. **MARKETING**| Use the [marketing](#marketing) load generator. **TPCH** | Use the [tpch](#tpch) load generator. @@ -40,14 +39,6 @@ _src_name_ | The name for the source. **AS OF** | The tick at which to start producing data. Defaults to 0. {{< warn-if-unreleased-inline "v0.101" >}} **UP TO** | The tick before which to stop producing data. Defaults to infinite. {{< warn-if-unreleased-inline "v0.101" >}} **SCALE FACTOR** | The scale factor for the `TPCH` generator. Defaults to `0.01` (~ 10MB). -**MAX CARDINALITY** | Valid for the `COUNTER` generator. Causes the generator to delete old values to keep the collection at most a given size. Defaults to unlimited. -**KEYS** | Valid for [`KEY VALUE` generator](#key-value). -**SNAPSHOT ROUNDS** | Valid for [`KEY VALUE` generator](#key-value). -**TRANSACTIONAL SNAPSHOT** | Valid for [`KEY VALUE` generator](#key-value). -**VALUE SIZE** | Valid for [`KEY VALUE` generator](#key-value). -**SEED** | Valid for [`KEY VALUE` generator](#key-value). -**PARTITIONS** | Valid for [`KEY VALUE` generator](#key-value). -**BATCH SIZE** | Valid for [`KEY VALUE` generator](#key-value). **FOR ALL TABLES** | Creates subsources for all tables in the load generator. **EXPOSE PROGRESS AS** _progress_subsource_name_ | The name of the progress subsource for the source. If this is not specified, the subsource will be named `_progress`. For more information, see [Monitoring source progress](#monitoring-source-progress). **RETAIN HISTORY FOR**
_retention_period_ | ***Private preview.** This option has known performance or stability issues and is under active development.* Duration for which Materialize retains historical data, which is useful to implement [durable subscriptions](/transform-data/patterns/durable-subscriptions/#history-retention-period). Accepts positive [interval](/sql/types/interval/) values (e.g. `'1hr'`). Default: `1s`. @@ -59,11 +50,6 @@ get up and running with no external dependencies before plugging in your own data sources. If you would like to see an additional load generator, please submit a [feature request]. -### Counter - -The counter load generator produces the sequence `1`, `2`, `3`, …. Each tick -interval, the next number in the sequence is emitted. - ### Auction The auction load generator simulates an auction house, where users are bidding @@ -183,37 +169,6 @@ The TPCH source must be used with `FOR ALL TABLES`, which will create the standa If `TICK INTERVAL` is specified, after the initial data load, an order and its lineitems will be changed at this interval. If not specified, the dataset will not change over time. -### KEY VALUE - -{{< private-preview />}} - -The `KEY VALUE` load generator produces keyed data that is intended to be passed though the [`UPSERT` envelope](/sql/create-source/#upsert-envelope). -Its size and performance can be configured in detailed ways. - -The schema of the data is: - -Field | Type | Description ------------|------------|------------ -key | [`uint8`] | The key for the value -partition | [`uint8`] | The partition this key belongs to -value | [`bytea`] | Random data associated with the key. -offset | [`uint8`] | The offset of the data (if `INCLUDE OFFSET` is configured). - -The following options are supported: - -- `KEYS`: The number of keys in the source. For now, this must be divisible by `PARTITIONS` * `BATCH SIZE`, - though this constraint may be lifted in the future. -- `SNAPSHOT ROUNDS`: The number of rounds of data (1 update per key in each round) to produce - as the source starts up. Can be used to scale the size of the snapshot without changing the number - of keys. -- `TRANSACTIONAL SNAPSHOT`: Whether or not to emit the snapshot as a singular transaction. -- `VALUE SIZE`: The number of bytes in each `value`. -- `TICK INTERVAL`: The _minimum interval_ (as an [`interval`]) to produce batches of data (within each partition) after snapshotting. -- `SEED`: A per-source [`uint8`] seed for seeding the random data. -- `PARTITIONS`: The number of partitions to spread the keys across. Can be used to scale concurrency independent of - the replica size. -- `BATCH SIZE`: The number of keys per partition to produce in each update (based on `TICK INTERVAL`). - ### Monitoring source progress By default, load generator sources expose progress metadata as a subsource that @@ -240,30 +195,6 @@ issues, see [Troubleshooting](/ops/troubleshooting/). ## Examples -### Creating a counter load generator - -To create a load generator source that emits the next number in the sequence every -500 milliseconds: - -```mzsql -CREATE SOURCE counter - FROM LOAD GENERATOR COUNTER - (TICK INTERVAL '500ms'); -``` - -To examine the counter: - -```mzsql -SELECT * FROM counter; -``` -```nofmt - counter ---------- - 1 - 2 - 3 -``` - ### Creating an auction load generator To create a load generator source that simulates an auction house and emits new data every second: diff --git a/doc/user/content/sql/subscribe.md b/doc/user/content/sql/subscribe.md index 52ea1764d0301..7e522e9ba50e5 100644 --- a/doc/user/content/sql/subscribe.md +++ b/doc/user/content/sql/subscribe.md @@ -241,12 +241,12 @@ timestamp `4` implies that there are no more updates for either timestamp Many drivers buffer all results until a query is complete, and so will never return. Below are the recommended ways to work around this. -### Creating a counter load generator +### Creating an auction load generator -As an example, we'll create a [counter load generator](/sql/create-source/load-generator/#creating-a-counter-load-generator) that emits a row every second: +As an example, we'll create a [auction load generator](/sql/create-source/load-generator/#creating-an-auction-load-generator) that emits a row every second: ```mzsql -CREATE SOURCE counter FROM LOAD GENERATOR COUNTER; +CREATE SOURCE auction FROM LOAD GENERATOR AUCTION FOR ALL TABLES; ``` ### Subscribing with `FETCH` @@ -254,13 +254,13 @@ CREATE SOURCE counter FROM LOAD GENERATOR COUNTER; The recommended way to use `SUBSCRIBE` is with [`DECLARE`](/sql/declare) and [`FETCH`](/sql/fetch). These must be used within a transaction, with [a single `DECLARE`](/sql/begin/#read-only-transactions) per transaction. This allows you to limit the number of rows and the time window of your requests. -Next, let's subscribe to the `counter` load generator source that we've created above. +Next, let's subscribe to the `bids` table of the `auction` load generator source that we've created above. First, declare a `SUBSCRIBE` cursor: ```mzsql BEGIN; -DECLARE c CURSOR FOR SUBSCRIBE (SELECT * FROM counter); +DECLARE c CURSOR FOR SUBSCRIBE (SELECT * FROM bids); ``` Then, use [`FETCH`](/sql/fetch) in a loop to retrieve each batch of results as soon as it's ready: @@ -294,7 +294,7 @@ FETCH ALL c WITH (timeout='0s'); If you want to use `SUBSCRIBE` from an interactive SQL session (e.g.`psql`), wrap the query in `COPY`: ```mzsql -COPY (SUBSCRIBE (SELECT * FROM counter)) TO STDOUT; +COPY (SUBSCRIBE (SELECT * FROM bids)) TO STDOUT; ``` | Additional guides | @@ -537,12 +537,12 @@ to sort the rows within each distinct timestamp. * If [`PROGRESS`](#progress) is set, progress messages are unaffected. -### Dropping the `counter` load generator source +### Dropping the `auction` load generator source -When you're done, you can drop the `counter` load generator source: +When you're done, you can drop the `auction` load generator source: ```mzsql -DROP SOURCE counter; +DROP SOURCE auction CASCADE; ``` ### Durable subscriptions diff --git a/doc/user/layouts/partials/sql-grammar/create-source-load-generator.svg b/doc/user/layouts/partials/sql-grammar/create-source-load-generator.svg index 18903f0bb49b5..ae6d85337427e 100644 --- a/doc/user/layouts/partials/sql-grammar/create-source-load-generator.svg +++ b/doc/user/layouts/partials/sql-grammar/create-source-load-generator.svg @@ -1,4 +1,4 @@ - + @@ -47,38 +47,22 @@ class="terminal" rx="10"/> AUCTION - + - COUNTER - - - MARKETING - + MARKETING + - TPCH - - - KEY VALUE + TPCH ) - + - FOR ALL TABLES - + FOR ALL TABLES + - EXPOSE - + EXPOSE + - PROGRESS - + PROGRESS + - AS - - - progress_subsource_name - - - with_options + AS + + + progress_subsource_name + + + with_options - - + d="m17 17 h2 m0 0 h10 m140 0 h10 m20 0 h10 m0 0 h130 m-160 0 h20 m140 0 h20 m-180 0 q10 0 10 10 m160 0 q0 -10 10 -10 m-170 10 v12 m160 0 v-12 m-160 12 q0 10 10 10 m140 0 q10 0 10 -10 m-150 10 h10 m120 0 h10 m20 -32 h10 m82 0 h10 m2 0 l2 0 m2 0 l2 0 m2 0 l2 0 m-462 98 l2 0 m2 0 l2 0 m2 0 l2 0 m22 0 h10 m0 0 h242 m-272 0 h20 m252 0 h20 m-292 0 q10 0 10 10 m272 0 q0 -10 10 -10 m-282 10 v12 m272 0 v-12 m-272 12 q0 10 10 10 m252 0 q10 0 10 -10 m-262 10 h10 m104 0 h10 m0 0 h10 m108 0 h10 m20 -32 h10 m196 0 h10 m2 0 l2 0 m2 0 l2 0 m2 0 l2 0 m-539 142 l2 0 m2 0 l2 0 m2 0 l2 0 m22 0 h10 m86 0 h10 m0 0 h18 m-144 0 h20 m124 0 h20 m-164 0 q10 0 10 10 m144 0 q0 -10 10 -10 m-154 10 v24 m144 0 v-24 m-144 24 q0 10 10 10 m124 0 q10 0 10 -10 m-134 10 h10 m104 0 h10 m-134 -10 v20 m144 0 v-20 m-144 20 v24 m144 0 v-24 m-144 24 q0 10 10 10 m124 0 q10 0 10 -10 m-134 10 h10 m60 0 h10 m0 0 h44 m40 -88 h10 m26 0 h10 m20 0 h10 m166 0 h10 m-206 0 l20 0 m-1 0 q-9 0 -9 -10 l0 -24 q0 -10 10 -10 m186 44 l20 0 m-20 0 q10 0 10 -10 l0 -24 q0 -10 -10 -10 m-186 0 h10 m24 0 h10 m0 0 h142 m20 44 h10 m26 0 h10 m-338 0 h20 m318 0 h20 m-358 0 q10 0 10 10 m338 0 q0 -10 10 -10 m-348 10 v14 m338 0 v-14 m-338 14 q0 10 10 10 m318 0 q10 0 10 -10 m-328 10 h10 m0 0 h308 m22 -34 l2 0 m2 0 l2 0 m2 0 l2 0 m-364 154 l2 0 m2 0 l2 0 m2 0 l2 0 m2 0 h10 m138 0 h10 m2 0 l2 0 m2 0 l2 0 m2 0 l2 0 m-367 50 l2 0 m2 0 l2 0 m2 0 l2 0 m22 0 h10 m0 0 h478 m-508 0 h20 m488 0 h20 m-528 0 q10 0 10 10 m508 0 q0 -10 10 -10 m-518 10 v12 m508 0 v-12 m-508 12 q0 10 10 10 m488 0 q10 0 10 -10 m-498 10 h10 m76 0 h10 m0 0 h10 m96 0 h10 m0 0 h10 m40 0 h10 m0 0 h10 m196 0 h10 m22 -32 l2 0 m2 0 l2 0 m2 0 l2 0 m-192 86 l2 0 m2 0 l2 0 m2 0 l2 0 m22 0 h10 m0 0 h112 m-142 0 h20 m122 0 h20 m-162 0 q10 0 10 10 m142 0 q0 -10 10 -10 m-152 10 v12 m142 0 v-12 m-142 12 q0 10 10 10 m122 0 q10 0 10 -10 m-132 10 h10 m102 0 h10 m23 -32 h-3"/> + + diff --git a/doc/user/layouts/partials/sql-grammar/load-generator-option.svg b/doc/user/layouts/partials/sql-grammar/load-generator-option.svg index 73d7db6457669..54e2f538b5f23 100644 --- a/doc/user/layouts/partials/sql-grammar/load-generator-option.svg +++ b/doc/user/layouts/partials/sql-grammar/load-generator-option.svg @@ -1,4 +1,4 @@ - + @@ -42,96 +42,8 @@ scale_factor - - - MAX CARDINALITY - - - max_cardinality - - - KEYS - - - keys - - - SNAPSHOT ROUNDS - - - snapshot_rounds - - - TRANSACTIONAL SNAPSHOT - - - transactional_snapshot - - - VALUE SIZE - - - value_size - - - SEED - - - seed - - - PARTITIONS - - - partitions - - - BATCH SIZE - - - batch_size - - + d="m17 17 h2 m20 0 h10 m128 0 h10 m0 0 h10 m68 0 h10 m0 0 h34 m-290 0 h20 m270 0 h20 m-310 0 q10 0 10 10 m290 0 q0 -10 10 -10 m-300 10 v24 m290 0 v-24 m-290 24 q0 10 10 10 m270 0 q10 0 10 -10 m-260 10 h10 m62 0 h10 m0 0 h2 m-104 0 h20 m84 0 h20 m-124 0 q10 0 10 10 m104 0 q0 -10 10 -10 m-114 10 v24 m104 0 v-24 m-104 24 q0 10 10 10 m84 0 q10 0 10 -10 m-94 10 h10 m64 0 h10 m20 -44 h10 m44 0 h10 m0 0 h82 m-280 -10 v20 m290 0 v-20 m-290 20 v68 m290 0 v-68 m-290 68 q0 10 10 10 m270 0 q10 0 10 -10 m-280 10 h10 m130 0 h10 m0 0 h10 m100 0 h10 m23 -132 h-3"/> + + diff --git a/doc/user/sql-grammar/sql-grammar.bnf b/doc/user/sql-grammar/sql-grammar.bnf index 8ae3f324896d5..6eac3eea7e476 100644 --- a/doc/user/sql-grammar/sql-grammar.bnf +++ b/doc/user/sql-grammar/sql-grammar.bnf @@ -196,7 +196,7 @@ create_source_kafka ::= create_source_load_generator ::= 'CREATE SOURCE' ('IF NOT EXISTS')? src_name ('IN CLUSTER' cluster_name)? - 'FROM LOAD GENERATOR' ('AUCTION' | 'COUNTER' | 'MARKETING' | 'TPCH' | 'KEY VALUE') + 'FROM LOAD GENERATOR' ('AUCTION' | 'MARKETING' | 'TPCH' | 'KEY VALUE') ('(' (load_generator_option) ( ( ',' load_generator_option ) )* ')')? 'FOR ALL TABLES' ('EXPOSE' 'PROGRESS' 'AS' progress_subsource_name)? diff --git a/misc/python/materialize/mzcompose/__init__.py b/misc/python/materialize/mzcompose/__init__.py index cb4bb396907d2..29a3dcfe86930 100644 --- a/misc/python/materialize/mzcompose/__init__.py +++ b/misc/python/materialize/mzcompose/__init__.py @@ -100,6 +100,7 @@ def get_minimal_system_parameters( "enable_introspection_subscribes": "true", "enable_kafka_sink_partition_by": "true", "enable_lgalloc": "false", + "enable_load_generator_counter": "true", "enable_logical_compaction_window": "true", "enable_multi_worker_storage_persist_sink": "true", "enable_multi_replica_sources": "true", diff --git a/src/environmentd/tests/pgwire.rs b/src/environmentd/tests/pgwire.rs index 2223b8eb7c875..530d18fb4929f 100644 --- a/src/environmentd/tests/pgwire.rs +++ b/src/environmentd/tests/pgwire.rs @@ -622,6 +622,7 @@ fn pg_test_inner(path: &Path, mz_flags: bool) { server.enable_feature_flags(&[ "enable_copy_to_expr", "enable_create_table_from_source", + "enable_load_generator_datums", "enable_raise_statement", "unsafe_enable_unorchestrated_cluster_replicas", "unsafe_enable_unsafe_functions", diff --git a/src/environmentd/tests/server.rs b/src/environmentd/tests/server.rs index 53df33fbdd9ed..fc20f348eb628 100644 --- a/src/environmentd/tests/server.rs +++ b/src/environmentd/tests/server.rs @@ -218,6 +218,9 @@ fn test_statement_logging_immediate() { mz_client .batch_execute("ALTER SYSTEM SET statement_logging_default_sample_rate = 1") .unwrap(); + mz_client + .batch_execute("ALTER SYSTEM SET enable_load_generator_counter = true") + .unwrap(); let successful_immediates: &[&str] = &[ "CREATE VIEW v AS SELECT 1;", diff --git a/src/environmentd/tests/sql.rs b/src/environmentd/tests/sql.rs index 74fcea570fae1..b59b0b7b5b388 100644 --- a/src/environmentd/tests/sql.rs +++ b/src/environmentd/tests/sql.rs @@ -2088,7 +2088,10 @@ fn test_load_generator() { let server = test_util::TestHarness::default() .unsafe_mode() .start_blocking(); - server.enable_feature_flags(&["enable_create_table_from_source"]); + server.enable_feature_flags(&[ + "enable_create_table_from_source", + "enable_load_generator_counter", + ]); let mut client = server.connect(postgres::NoTls).unwrap(); client diff --git a/src/sql/src/plan/statement/ddl.rs b/src/sql/src/plan/statement/ddl.rs index 59b7db484ba3a..43293ea7799a4 100644 --- a/src/sql/src/plan/statement/ddl.rs +++ b/src/sql/src/plan/statement/ddl.rs @@ -2066,17 +2066,21 @@ pub(crate) fn load_generator_ast_to_generator( let load_generator = match loadgen { ast::LoadGenerator::Auction => LoadGenerator::Auction, ast::LoadGenerator::Clock => { - scx.require_feature_flag(&crate::session::vars::ENABLE_CLOCK_LOAD_GENERATOR)?; + scx.require_feature_flag(&vars::ENABLE_LOAD_GENERATOR_CLOCK)?; LoadGenerator::Clock } ast::LoadGenerator::Counter => { + scx.require_feature_flag(&vars::ENABLE_LOAD_GENERATOR_COUNTER)?; let LoadGeneratorOptionExtracted { max_cardinality, .. } = extracted; LoadGenerator::Counter { max_cardinality } } ast::LoadGenerator::Marketing => LoadGenerator::Marketing, - ast::LoadGenerator::Datums => LoadGenerator::Datums, + ast::LoadGenerator::Datums => { + scx.require_feature_flag(&vars::ENABLE_LOAD_GENERATOR_DATUMS)?; + LoadGenerator::Datums + } ast::LoadGenerator::Tpch => { let LoadGeneratorOptionExtracted { scale_factor, .. } = extracted; diff --git a/src/sql/src/session/vars/definitions.rs b/src/sql/src/session/vars/definitions.rs index 5461d0ae5a704..71c7b84e26b58 100644 --- a/src/sql/src/session/vars/definitions.rs +++ b/src/sql/src/session/vars/definitions.rs @@ -1978,11 +1978,29 @@ feature_flags!( default: false, enable_for_item_parsing: true, }, + { + name: enable_load_generator_counter, + desc: "Create a LOAD GENERATOR COUNTER", + default: false, + enable_for_item_parsing: true, + }, + { + name: enable_load_generator_clock, + desc: "Create a LOAD GENERATOR CLOCK", + default: false, + enable_for_item_parsing: true, + }, + { + name: enable_load_generator_datums, + desc: "Create a LOAD GENERATOR DATUMS", + default: false, + enable_for_item_parsing: true, + }, { name: enable_load_generator_key_value, desc: "Create a LOAD GENERATOR KEY VALUE", default: false, - enable_for_item_parsing: false, + enable_for_item_parsing: true, }, { name: enable_expressions_in_limit_syntax, @@ -2099,12 +2117,6 @@ feature_flags!( default: true, enable_for_item_parsing: true, }, - { - name: enable_clock_load_generator, - desc: "Enable the clock load generator", - default: false, - enable_for_item_parsing: true, - }, { name: enable_continual_task_create, desc: "CREATE CONTINUAL TASK", diff --git a/test/testdrive/load-generator.td b/test/testdrive/load-generator.td index a71fa15bf6386..c5b9d248f7c2e 100644 --- a/test/testdrive/load-generator.td +++ b/test/testdrive/load-generator.td @@ -11,7 +11,8 @@ $ set-arg-default default-replica-size=scale=1,workers=1 $ set-arg-default single-replica-cluster=quickstart $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr} -ALTER SYSTEM SET enable_clock_load_generator = true; +ALTER SYSTEM SET enable_load_generator_clock = true; +ALTER SYSTEM SET enable_load_generator_datums = true; > CREATE SOURCE counter_empty IN CLUSTER ${arg.single-replica-cluster}