Files
hatchet/pkg/repository/v1/sqlcv1/payload-store.sql.go
matt 0a947924fa Feat: Parallelize replication from PG -> External (#2637)
* feat: chunking query

* feat: first pass at range chunking

* fix: bug bashing

* fix: function geq

* fix: use maps.Copy

* fix: olap func

* feat: olap side

* refactor: external id

* fix: order by

* feat: wire up env vars

* fix: pass var through

* fix: naming

* fix: append to returnErr properly

* fix: use eg.Go
2025-12-10 17:11:03 -05:00

718 lines
22 KiB
Go

// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.29.0
// source: payload-store.sql
package sqlcv1
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const acquireOrExtendCutoverJobLease = `-- name: AcquireOrExtendCutoverJobLease :one
WITH inputs AS (
SELECT
$2::DATE AS key,
$1::UUID AS lease_process_id,
$3::TIMESTAMPTZ AS lease_expires_at,
$4::UUID AS last_tenant_id,
$5::TIMESTAMPTZ AS last_inserted_at,
$6::BIGINT AS last_id,
$7::v1_payload_type AS last_type
), any_lease_held_by_other_process AS (
-- need coalesce here in case there are no rows that don't belong to this process
SELECT COALESCE(BOOL_OR(lease_expires_at > NOW()), FALSE) AS lease_exists
FROM v1_payload_cutover_job_offset
WHERE lease_process_id != $1::UUID
), to_insert AS (
SELECT key, lease_process_id, lease_expires_at, last_tenant_id, last_inserted_at, last_id, last_type
FROM inputs
-- if a lease is held by another process, we shouldn't try to insert a new row regardless
-- of which key we're trying to acquire a lease on
WHERE NOT (SELECT lease_exists FROM any_lease_held_by_other_process)
)
INSERT INTO v1_payload_cutover_job_offset (key, lease_process_id, lease_expires_at, last_tenant_id, last_inserted_at, last_id, last_type)
SELECT ti.key, ti.lease_process_id, ti.lease_expires_at, ti.last_tenant_id, ti.last_inserted_at, ti.last_id, ti.last_type
FROM to_insert ti
ON CONFLICT (key)
DO UPDATE SET
-- if the lease is held by this process, then we extend the offset to the new tuple of (last_tenant_id, last_inserted_at, last_id, last_type)
-- otherwise it's a new process acquiring the lease, so we should keep the offset where it was before
last_tenant_id = CASE
WHEN EXCLUDED.lease_process_id = v1_payload_cutover_job_offset.lease_process_id THEN EXCLUDED.last_tenant_id
ELSE v1_payload_cutover_job_offset.last_tenant_id
END,
last_inserted_at = CASE
WHEN EXCLUDED.lease_process_id = v1_payload_cutover_job_offset.lease_process_id THEN EXCLUDED.last_inserted_at
ELSE v1_payload_cutover_job_offset.last_inserted_at
END,
last_id = CASE
WHEN EXCLUDED.lease_process_id = v1_payload_cutover_job_offset.lease_process_id THEN EXCLUDED.last_id
ELSE v1_payload_cutover_job_offset.last_id
END,
last_type = CASE
WHEN EXCLUDED.lease_process_id = v1_payload_cutover_job_offset.lease_process_id THEN EXCLUDED.last_type
ELSE v1_payload_cutover_job_offset.last_type
END,
lease_process_id = EXCLUDED.lease_process_id,
lease_expires_at = EXCLUDED.lease_expires_at
WHERE v1_payload_cutover_job_offset.lease_expires_at < NOW() OR v1_payload_cutover_job_offset.lease_process_id = $1::UUID
RETURNING key, is_completed, lease_process_id, lease_expires_at, last_tenant_id, last_inserted_at, last_id, last_type
`
type AcquireOrExtendCutoverJobLeaseParams struct {
Leaseprocessid pgtype.UUID `json:"leaseprocessid"`
Key pgtype.Date `json:"key"`
Leaseexpiresat pgtype.Timestamptz `json:"leaseexpiresat"`
Lasttenantid pgtype.UUID `json:"lasttenantid"`
Lastinsertedat pgtype.Timestamptz `json:"lastinsertedat"`
Lastid int64 `json:"lastid"`
Lasttype V1PayloadType `json:"lasttype"`
}
func (q *Queries) AcquireOrExtendCutoverJobLease(ctx context.Context, db DBTX, arg AcquireOrExtendCutoverJobLeaseParams) (*V1PayloadCutoverJobOffset, error) {
row := db.QueryRow(ctx, acquireOrExtendCutoverJobLease,
arg.Leaseprocessid,
arg.Key,
arg.Leaseexpiresat,
arg.Lasttenantid,
arg.Lastinsertedat,
arg.Lastid,
arg.Lasttype,
)
var i V1PayloadCutoverJobOffset
err := row.Scan(
&i.Key,
&i.IsCompleted,
&i.LeaseProcessID,
&i.LeaseExpiresAt,
&i.LastTenantID,
&i.LastInsertedAt,
&i.LastID,
&i.LastType,
)
return &i, err
}
const analyzeV1Payload = `-- name: AnalyzeV1Payload :exec
ANALYZE v1_payload
`
func (q *Queries) AnalyzeV1Payload(ctx context.Context, db DBTX) error {
_, err := db.Exec(ctx, analyzeV1Payload)
return err
}
const createPayloadRangeChunks = `-- name: CreatePayloadRangeChunks :many
WITH payloads AS (
SELECT
(p).*
FROM list_paginated_payloads_for_offload(
$2::DATE,
$3::INTEGER,
$4::UUID,
$5::TIMESTAMPTZ,
$6::BIGINT,
$7::v1_payload_type
) p
), with_rows AS (
SELECT
tenant_id::UUID,
id::BIGINT,
inserted_at::TIMESTAMPTZ,
type::v1_payload_type,
ROW_NUMBER() OVER (ORDER BY tenant_id, inserted_at, id, type) AS rn
FROM payloads
)
SELECT tenant_id, id, inserted_at, type, rn
FROM with_rows
WHERE MOD(rn, $1::INTEGER) = 1
ORDER BY tenant_id, inserted_at, id, type
`
type CreatePayloadRangeChunksParams struct {
Chunksize int32 `json:"chunksize"`
Partitiondate pgtype.Date `json:"partitiondate"`
Windowsize int32 `json:"windowsize"`
Lasttenantid pgtype.UUID `json:"lasttenantid"`
Lastinsertedat pgtype.Timestamptz `json:"lastinsertedat"`
Lastid int64 `json:"lastid"`
Lasttype V1PayloadType `json:"lasttype"`
}
type CreatePayloadRangeChunksRow struct {
TenantID pgtype.UUID `json:"tenant_id"`
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
Type V1PayloadType `json:"type"`
Rn int64 `json:"rn"`
}
// row numbers are one-indexed
func (q *Queries) CreatePayloadRangeChunks(ctx context.Context, db DBTX, arg CreatePayloadRangeChunksParams) ([]*CreatePayloadRangeChunksRow, error) {
rows, err := db.Query(ctx, createPayloadRangeChunks,
arg.Chunksize,
arg.Partitiondate,
arg.Windowsize,
arg.Lasttenantid,
arg.Lastinsertedat,
arg.Lastid,
arg.Lasttype,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*CreatePayloadRangeChunksRow
for rows.Next() {
var i CreatePayloadRangeChunksRow
if err := rows.Scan(
&i.TenantID,
&i.ID,
&i.InsertedAt,
&i.Type,
&i.Rn,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const createV1PayloadCutoverTemporaryTable = `-- name: CreateV1PayloadCutoverTemporaryTable :exec
SELECT copy_v1_payload_partition_structure($1::DATE)
`
func (q *Queries) CreateV1PayloadCutoverTemporaryTable(ctx context.Context, db DBTX, date pgtype.Date) error {
_, err := db.Exec(ctx, createV1PayloadCutoverTemporaryTable, date)
return err
}
const cutOverPayloadsToExternal = `-- name: CutOverPayloadsToExternal :one
WITH tenants AS (
SELECT UNNEST(
find_matching_tenants_in_payload_cutover_queue_item_partition(
$1::INT
)
) AS tenant_id
), queue_items AS (
SELECT tenant_id, cut_over_at, payload_id, payload_inserted_at, payload_type
FROM v1_payload_cutover_queue_item
WHERE
tenant_id = ANY(SELECT tenant_id FROM tenants)
AND cut_over_at <= NOW()
ORDER BY cut_over_at
LIMIT $2::INT
FOR UPDATE SKIP LOCKED
), payload_updates AS (
UPDATE v1_payload
SET
location = 'EXTERNAL',
inline_content = NULL,
updated_at = NOW()
FROM queue_items qi
WHERE
v1_payload.id = qi.payload_id
AND v1_payload.inserted_at = qi.payload_inserted_at
AND v1_payload.tenant_id = qi.tenant_id
AND v1_payload.type = qi.payload_type
AND v1_payload.external_location_key IS NOT NULL
), deletions AS (
DELETE FROM v1_payload_cutover_queue_item
WHERE
(cut_over_at, payload_id, payload_inserted_at, payload_type, tenant_id) IN (
SELECT cut_over_at, payload_id, payload_inserted_at, payload_type, tenant_id
FROM queue_items
)
)
SELECT COUNT(*)
FROM queue_items
`
type CutOverPayloadsToExternalParams struct {
Partitionnumber int32 `json:"partitionnumber"`
Polllimit int32 `json:"polllimit"`
}
func (q *Queries) CutOverPayloadsToExternal(ctx context.Context, db DBTX, arg CutOverPayloadsToExternalParams) (int64, error) {
row := db.QueryRow(ctx, cutOverPayloadsToExternal, arg.Partitionnumber, arg.Polllimit)
var count int64
err := row.Scan(&count)
return count, err
}
const listPaginatedPayloadsForOffload = `-- name: ListPaginatedPayloadsForOffload :many
WITH payloads AS (
SELECT
(p).*
FROM list_paginated_payloads_for_offload(
$1::DATE,
$2::INT,
$3::UUID,
$4::TIMESTAMPTZ,
$5::BIGINT,
$6::v1_payload_type
) p
)
SELECT
tenant_id::UUID,
id::BIGINT,
inserted_at::TIMESTAMPTZ,
external_id::UUID,
type::v1_payload_type,
location::v1_payload_location,
COALESCE(external_location_key, '')::TEXT AS external_location_key,
inline_content::JSONB AS inline_content,
updated_at::TIMESTAMPTZ
FROM payloads
`
type ListPaginatedPayloadsForOffloadParams struct {
Partitiondate pgtype.Date `json:"partitiondate"`
Limitparam int32 `json:"limitparam"`
Lasttenantid pgtype.UUID `json:"lasttenantid"`
Lastinsertedat pgtype.Timestamptz `json:"lastinsertedat"`
Lastid int64 `json:"lastid"`
Lasttype V1PayloadType `json:"lasttype"`
}
type ListPaginatedPayloadsForOffloadRow struct {
TenantID pgtype.UUID `json:"tenant_id"`
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
ExternalID pgtype.UUID `json:"external_id"`
Type V1PayloadType `json:"type"`
Location V1PayloadLocation `json:"location"`
ExternalLocationKey string `json:"external_location_key"`
InlineContent []byte `json:"inline_content"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
}
func (q *Queries) ListPaginatedPayloadsForOffload(ctx context.Context, db DBTX, arg ListPaginatedPayloadsForOffloadParams) ([]*ListPaginatedPayloadsForOffloadRow, error) {
rows, err := db.Query(ctx, listPaginatedPayloadsForOffload,
arg.Partitiondate,
arg.Limitparam,
arg.Lasttenantid,
arg.Lastinsertedat,
arg.Lastid,
arg.Lasttype,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListPaginatedPayloadsForOffloadRow
for rows.Next() {
var i ListPaginatedPayloadsForOffloadRow
if err := rows.Scan(
&i.TenantID,
&i.ID,
&i.InsertedAt,
&i.ExternalID,
&i.Type,
&i.Location,
&i.ExternalLocationKey,
&i.InlineContent,
&i.UpdatedAt,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const markCutoverJobAsCompleted = `-- name: MarkCutoverJobAsCompleted :exec
UPDATE v1_payload_cutover_job_offset
SET is_completed = TRUE
WHERE key = $1::DATE
`
func (q *Queries) MarkCutoverJobAsCompleted(ctx context.Context, db DBTX, key pgtype.Date) error {
_, err := db.Exec(ctx, markCutoverJobAsCompleted, key)
return err
}
const pollPayloadWALForRecordsToReplicate = `-- name: PollPayloadWALForRecordsToReplicate :many
WITH tenants AS (
SELECT UNNEST(
find_matching_tenants_in_payload_wal_partition(
$1::INT
)
) AS tenant_id
), wal_records AS (
SELECT tenant_id, offload_at, payload_id, payload_inserted_at, payload_type, operation
FROM v1_payload_wal
WHERE tenant_id = ANY(SELECT tenant_id FROM tenants)
ORDER BY offload_at
LIMIT $2::INT
FOR UPDATE SKIP LOCKED
), wal_records_without_payload AS (
SELECT tenant_id, offload_at, payload_id, payload_inserted_at, payload_type, operation
FROM wal_records wr
WHERE NOT EXISTS (
SELECT 1
FROM v1_payload p
WHERE (p.tenant_id, p.inserted_at, p.id, p.type) = (wr.tenant_id, wr.payload_inserted_at, wr.payload_id, wr.payload_type)
)
), deleted_wal_records AS (
DELETE FROM v1_payload_wal
WHERE (offload_at, payload_id, payload_inserted_at, payload_type, tenant_id) IN (
SELECT offload_at, payload_id, payload_inserted_at, payload_type, tenant_id
FROM wal_records_without_payload
)
)
SELECT wr.tenant_id, wr.offload_at, wr.payload_id, wr.payload_inserted_at, wr.payload_type, wr.operation, p.location, p.inline_content
FROM wal_records wr
JOIN v1_payload p ON (p.tenant_id, p.inserted_at, p.id, p.type) = (wr.tenant_id, wr.payload_inserted_at, wr.payload_id, wr.payload_type)
`
type PollPayloadWALForRecordsToReplicateParams struct {
Partitionnumber int32 `json:"partitionnumber"`
Polllimit int32 `json:"polllimit"`
}
type PollPayloadWALForRecordsToReplicateRow struct {
TenantID pgtype.UUID `json:"tenant_id"`
OffloadAt pgtype.Timestamptz `json:"offload_at"`
PayloadID int64 `json:"payload_id"`
PayloadInsertedAt pgtype.Timestamptz `json:"payload_inserted_at"`
PayloadType V1PayloadType `json:"payload_type"`
Operation V1PayloadWalOperation `json:"operation"`
Location V1PayloadLocation `json:"location"`
InlineContent []byte `json:"inline_content"`
}
func (q *Queries) PollPayloadWALForRecordsToReplicate(ctx context.Context, db DBTX, arg PollPayloadWALForRecordsToReplicateParams) ([]*PollPayloadWALForRecordsToReplicateRow, error) {
rows, err := db.Query(ctx, pollPayloadWALForRecordsToReplicate, arg.Partitionnumber, arg.Polllimit)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*PollPayloadWALForRecordsToReplicateRow
for rows.Next() {
var i PollPayloadWALForRecordsToReplicateRow
if err := rows.Scan(
&i.TenantID,
&i.OffloadAt,
&i.PayloadID,
&i.PayloadInsertedAt,
&i.PayloadType,
&i.Operation,
&i.Location,
&i.InlineContent,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const readPayloads = `-- name: ReadPayloads :many
WITH inputs AS (
SELECT
UNNEST($1::BIGINT[]) AS id,
UNNEST($2::TIMESTAMPTZ[]) AS inserted_at,
UNNEST($3::UUID[]) AS tenant_id,
UNNEST(CAST($4::TEXT[] AS v1_payload_type[])) AS type
)
SELECT tenant_id, id, inserted_at, external_id, type, location, external_location_key, inline_content, updated_at
FROM v1_payload
WHERE (tenant_id, id, inserted_at, type) IN (
SELECT tenant_id, id, inserted_at, type
FROM inputs
)
`
type ReadPayloadsParams struct {
Ids []int64 `json:"ids"`
Insertedats []pgtype.Timestamptz `json:"insertedats"`
Tenantids []pgtype.UUID `json:"tenantids"`
Types []string `json:"types"`
}
func (q *Queries) ReadPayloads(ctx context.Context, db DBTX, arg ReadPayloadsParams) ([]*V1Payload, error) {
rows, err := db.Query(ctx, readPayloads,
arg.Ids,
arg.Insertedats,
arg.Tenantids,
arg.Types,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*V1Payload
for rows.Next() {
var i V1Payload
if err := rows.Scan(
&i.TenantID,
&i.ID,
&i.InsertedAt,
&i.ExternalID,
&i.Type,
&i.Location,
&i.ExternalLocationKey,
&i.InlineContent,
&i.UpdatedAt,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const setPayloadExternalKeys = `-- name: SetPayloadExternalKeys :many
WITH inputs AS (
SELECT
UNNEST($1::BIGINT[]) AS id,
UNNEST($2::TIMESTAMPTZ[]) AS inserted_at,
UNNEST(CAST($3::TEXT[] AS v1_payload_type[])) AS type,
UNNEST($4::TIMESTAMPTZ[]) AS offload_at,
UNNEST($5::TEXT[]) AS external_location_key,
UNNEST($6::UUID[]) AS tenant_id
), payload_updates AS (
UPDATE v1_payload
SET
external_location_key = i.external_location_key,
updated_at = NOW()
FROM inputs i
WHERE
v1_payload.id = i.id
AND v1_payload.inserted_at = i.inserted_at
AND v1_payload.tenant_id = i.tenant_id
RETURNING v1_payload.tenant_id, v1_payload.id, v1_payload.inserted_at, v1_payload.external_id, v1_payload.type, v1_payload.location, v1_payload.external_location_key, v1_payload.inline_content, v1_payload.updated_at
), cutover_queue_items AS (
INSERT INTO v1_payload_cutover_queue_item (
tenant_id,
cut_over_at,
payload_id,
payload_inserted_at,
payload_type
)
SELECT
i.tenant_id,
i.offload_at,
i.id,
i.inserted_at,
i.type
FROM
inputs i
ON CONFLICT DO NOTHING
), deletions AS (
DELETE FROM v1_payload_wal
WHERE
(offload_at, payload_id, payload_inserted_at, payload_type, tenant_id) IN (
SELECT offload_at, id, inserted_at, type, tenant_id
FROM inputs
)
)
SELECT tenant_id, id, inserted_at, external_id, type, location, external_location_key, inline_content, updated_at
FROM payload_updates
`
type SetPayloadExternalKeysParams struct {
Ids []int64 `json:"ids"`
Insertedats []pgtype.Timestamptz `json:"insertedats"`
Payloadtypes []string `json:"payloadtypes"`
Offloadats []pgtype.Timestamptz `json:"offloadats"`
Externallocationkeys []string `json:"externallocationkeys"`
Tenantids []pgtype.UUID `json:"tenantids"`
}
type SetPayloadExternalKeysRow struct {
TenantID pgtype.UUID `json:"tenant_id"`
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
ExternalID pgtype.UUID `json:"external_id"`
Type V1PayloadType `json:"type"`
Location V1PayloadLocation `json:"location"`
ExternalLocationKey pgtype.Text `json:"external_location_key"`
InlineContent []byte `json:"inline_content"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
}
func (q *Queries) SetPayloadExternalKeys(ctx context.Context, db DBTX, arg SetPayloadExternalKeysParams) ([]*SetPayloadExternalKeysRow, error) {
rows, err := db.Query(ctx, setPayloadExternalKeys,
arg.Ids,
arg.Insertedats,
arg.Payloadtypes,
arg.Offloadats,
arg.Externallocationkeys,
arg.Tenantids,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*SetPayloadExternalKeysRow
for rows.Next() {
var i SetPayloadExternalKeysRow
if err := rows.Scan(
&i.TenantID,
&i.ID,
&i.InsertedAt,
&i.ExternalID,
&i.Type,
&i.Location,
&i.ExternalLocationKey,
&i.InlineContent,
&i.UpdatedAt,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const swapV1PayloadPartitionWithTemp = `-- name: SwapV1PayloadPartitionWithTemp :exec
SELECT swap_v1_payload_partition_with_temp($1::DATE)
`
func (q *Queries) SwapV1PayloadPartitionWithTemp(ctx context.Context, db DBTX, date pgtype.Date) error {
_, err := db.Exec(ctx, swapV1PayloadPartitionWithTemp, date)
return err
}
const writePayloadWAL = `-- name: WritePayloadWAL :exec
WITH inputs AS (
SELECT
UNNEST($1::BIGINT[]) AS payload_id,
UNNEST($2::TIMESTAMPTZ[]) AS payload_inserted_at,
UNNEST(CAST($3::TEXT[] AS v1_payload_type[])) AS payload_type,
UNNEST($4::TIMESTAMPTZ[]) AS offload_at,
UNNEST($5::UUID[]) AS tenant_id
)
INSERT INTO v1_payload_wal (
tenant_id,
offload_at,
payload_id,
payload_inserted_at,
payload_type
)
SELECT
i.tenant_id,
i.offload_at,
i.payload_id,
i.payload_inserted_at,
i.payload_type
FROM
inputs i
ON CONFLICT DO NOTHING
`
type WritePayloadWALParams struct {
Payloadids []int64 `json:"payloadids"`
Payloadinsertedats []pgtype.Timestamptz `json:"payloadinsertedats"`
Payloadtypes []string `json:"payloadtypes"`
Offloadats []pgtype.Timestamptz `json:"offloadats"`
Tenantids []pgtype.UUID `json:"tenantids"`
}
func (q *Queries) WritePayloadWAL(ctx context.Context, db DBTX, arg WritePayloadWALParams) error {
_, err := db.Exec(ctx, writePayloadWAL,
arg.Payloadids,
arg.Payloadinsertedats,
arg.Payloadtypes,
arg.Offloadats,
arg.Tenantids,
)
return err
}
const writePayloads = `-- name: WritePayloads :exec
WITH inputs AS (
SELECT DISTINCT
UNNEST($1::BIGINT[]) AS id,
UNNEST($2::TIMESTAMPTZ[]) AS inserted_at,
UNNEST($3::UUID[]) AS external_id,
UNNEST(CAST($4::TEXT[] AS v1_payload_type[])) AS type,
UNNEST(CAST($5::TEXT[] AS v1_payload_location[])) AS location,
UNNEST($6::TEXT[]) AS external_location_key,
UNNEST($7::JSONB[]) AS inline_content,
UNNEST($8::UUID[]) AS tenant_id
)
INSERT INTO v1_payload (
tenant_id,
id,
inserted_at,
external_id,
type,
location,
external_location_key,
inline_content
)
SELECT
i.tenant_id,
i.id,
i.inserted_at,
i.external_id,
i.type,
i.location,
CASE WHEN i.external_location_key = '' OR i.location != 'EXTERNAL' THEN NULL ELSE i.external_location_key END,
i.inline_content
FROM
inputs i
ORDER BY i.tenant_id, i.inserted_at, i.id, i.type
ON CONFLICT (tenant_id, id, inserted_at, type)
DO UPDATE SET
location = EXCLUDED.location,
external_location_key = CASE WHEN EXCLUDED.external_location_key = '' OR EXCLUDED.location != 'EXTERNAL' THEN NULL ELSE EXCLUDED.external_location_key END,
inline_content = EXCLUDED.inline_content,
updated_at = NOW()
`
type WritePayloadsParams struct {
Ids []int64 `json:"ids"`
Insertedats []pgtype.Timestamptz `json:"insertedats"`
Externalids []pgtype.UUID `json:"externalids"`
Types []string `json:"types"`
Locations []string `json:"locations"`
Externallocationkeys []string `json:"externallocationkeys"`
Inlinecontents [][]byte `json:"inlinecontents"`
Tenantids []pgtype.UUID `json:"tenantids"`
}
func (q *Queries) WritePayloads(ctx context.Context, db DBTX, arg WritePayloadsParams) error {
_, err := db.Exec(ctx, writePayloads,
arg.Ids,
arg.Insertedats,
arg.Externalids,
arg.Types,
arg.Locations,
arg.Externallocationkeys,
arg.Inlinecontents,
arg.Tenantids,
)
return err
}