From 8b4f41ce21cc72ce77156ec838781fb21bdb1ff0 Mon Sep 17 00:00:00 2001 From: Brian Carlson Date: Wed, 4 Mar 2026 17:55:10 -0600 Subject: [PATCH 1/7] Add docs for --- docs/pages/apis/pool.mdx | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/pages/apis/pool.mdx b/docs/pages/apis/pool.mdx index fbe0279e1..98319a3f0 100644 --- a/docs/pages/apis/pool.mdx +++ b/docs/pages/apis/pool.mdx @@ -56,6 +56,13 @@ type Config = { // regardless of whether they are idle. It's useful to force rotation of connection pools through // middleware so that you can rotate the underlying servers. The default is disabled (value of zero) maxLifetimeSeconds?: number + + // Called once when a new client is created, before it is made available to the pool. + // The client is fully connected and queryable at this point. + // Can be a regular function or an async function. + // If the function throws or returns a promise that rejects, the client is destroyed + // and the error is returned to the caller requesting the connection. + onConnect?: (client: Client) => void | Promise } ``` @@ -74,6 +81,18 @@ const pool = new Pool({ }) ``` +example using `onConnect` to run setup commands on each new client: + +```js +import { Pool } from 'pg' + +const pool = new Pool({ + onConnect: async (client) => { + await client.query('SET search_path TO my_schema') + } +}) +``` + ## pool.query Often we only need to run a single query on the database, so as convenience the pool has a method to run a query on the first available idle client and return its result. From 9cd341b861a3e893f5e888cc2100d58e6e306138 Mon Sep 17 00:00:00 2001 From: Brian Carlson Date: Wed, 4 Mar 2026 17:57:43 -0600 Subject: [PATCH 2/7] Add docs for max uses --- docs/pages/apis/pool.mdx | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/pages/apis/pool.mdx b/docs/pages/apis/pool.mdx index 98319a3f0..4707ac852 100644 --- a/docs/pages/apis/pool.mdx +++ b/docs/pages/apis/pool.mdx @@ -51,6 +51,12 @@ type Config = { // where you don't want to wait for your clients to go idle before your process exits. allowExitOnIdle?: boolean + // number of times a client can be checked out from the pool before it is + // disconnected and a new client is created in its place. + // the default is Infinity which means a client will never be automatically destroyed outside of other lifecycle things + // like manually removing it, it timing out due to idelness, etc... + maxUses?: number + // Sets a max overall life for the connection. // A value of 60 would evict connections that have been around for over 60 seconds, // regardless of whether they are idle. It's useful to force rotation of connection pools through @@ -77,7 +83,7 @@ const pool = new Pool({ max: 20, idleTimeoutMillis: 30000, connectionTimeoutMillis: 2000, - maxLifetimeSeconds: 60 + maxLifetimeSeconds: 60, }) ``` @@ -89,7 +95,7 @@ import { Pool } from 'pg' const pool = new Pool({ onConnect: async (client) => { await client.query('SET search_path TO my_schema') - } + }, }) ``` From dc8d9eab0b27c53168d430f238934a671b749713 Mon Sep 17 00:00:00 2001 From: Brian Carlson Date: Wed, 4 Mar 2026 18:02:53 -0600 Subject: [PATCH 3/7] Clean up casing and grammar in comments --- docs/pages/apis/pool.mdx | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/pages/apis/pool.mdx b/docs/pages/apis/pool.mdx index 4707ac852..d9323979e 100644 --- a/docs/pages/apis/pool.mdx +++ b/docs/pages/apis/pool.mdx @@ -16,28 +16,28 @@ The pool is initially created empty and will create new clients lazily as they a ```ts type Config = { - // all valid client config options are also valid here - // in addition here are the pool specific configuration parameters: + // All valid client config options are also valid here. + // In addition here are the pool specific configuration parameters: - // number of milliseconds to wait before timing out when connecting a new client - // by default this is 0 which means no timeout + // Number of milliseconds to wait before timing out when connecting a new client. + // By default this is 0 which means no timeout. connectionTimeoutMillis?: number - // number of milliseconds a client must sit idle in the pool and not be checked out - // before it is disconnected from the backend and discarded - // default is 10000 (10 seconds) - set to 0 to disable auto-disconnection of idle clients + // Number of milliseconds a client must sit idle in the pool and not be checked out + // before it is disconnected from the backend and discarded. + // Default is 10000 (10 seconds) - set to 0 to disable auto-disconnection of idle clients. idleTimeoutMillis?: number - // maximum number of clients the pool should contain - // by default this is set to 10. There is some nuance to setting the maximum size of your pool. - // see https://node-postgres.com/guides/pool-sizing for more information + // Maximum number of clients the pool should contain. + // By default this is set to 10. There is some nuance to setting the maximum size of your pool. + // See https://node-postgres.com/guides/pool-sizing for more information. max?: number - // minimum number of clients the pool should hold on to and _not_ destroy with the idleTimeoutMillis - // this can be useful if you get very bursty traffic and want to keep a few clients around. - // note: current the pool will not automatically create and connect new clients up to the min, it will + // Minimum number of clients the pool should hold on to and _not_ destroy with the idleTimeoutMillis. + // This can be useful if you get very bursty traffic and want to keep a few clients around. + // Note: currently the pool will not automatically create and connect new clients up to the min, it will // only not evict and close clients except those which exceed the min count. - // the default is 0 which disables this behavior. + // The default is 0 which disables this behavior. min?: number // Default behavior is the pool will keep clients open & connected to the backend @@ -47,20 +47,20 @@ type Config = { // // Setting `allowExitOnIdle: true` in the config will allow the node event loop to exit // as soon as all clients in the pool are idle, even if their socket is still open - // to the postgres server. This can be handy in scripts & tests + // to the postgres server. This can be handy in scripts & tests // where you don't want to wait for your clients to go idle before your process exits. allowExitOnIdle?: boolean - // number of times a client can be checked out from the pool before it is + // Number of times a client can be checked out from the pool before it is // disconnected and a new client is created in its place. - // the default is Infinity which means a client will never be automatically destroyed outside of other lifecycle things - // like manually removing it, it timing out due to idelness, etc... + // The default is Infinity which means a client will never be automatically destroyed + // outside of other lifecycle events like manually removing it, it timing out due to idleness, etc. maxUses?: number // Sets a max overall life for the connection. // A value of 60 would evict connections that have been around for over 60 seconds, // regardless of whether they are idle. It's useful to force rotation of connection pools through - // middleware so that you can rotate the underlying servers. The default is disabled (value of zero) + // middleware so that you can rotate the underlying servers. The default is disabled (value of zero). maxLifetimeSeconds?: number // Called once when a new client is created, before it is made available to the pool. From 01ba340bbc047119a50940408030dacdb2a62e8f Mon Sep 17 00:00:00 2001 From: Brian Carlson Date: Wed, 4 Mar 2026 18:14:49 -0600 Subject: [PATCH 4/7] Add more docs on pool sizing --- docs/pages/guides/pool-sizing.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/pages/guides/pool-sizing.md b/docs/pages/guides/pool-sizing.md index 5c7ddaad8..dc514ca2d 100644 --- a/docs/pages/guides/pool-sizing.md +++ b/docs/pages/guides/pool-sizing.md @@ -16,6 +16,14 @@ In this situation, I'd probably set the `max` to 20 or 25. This lets you have pl If the number of instances of your services which connect to your database is more dynamic and based on things like load, auto-scaling containers, or running in cloud-functions, you need to be a bit more thoughtful about what your max might be. Often in these environments, there will be another database pooling proxy in front of the database like pg-bouncer or the RDS-proxy, etc. I'm not sure how all these function exactly, and they all have some trade-offs, but let's assume you're not using a proxy. Then I'd be pretty cautious about how large you set any individual pool. If you're running an application under pretty serious load where you need dynamic scaling or lots of lambdas spinning up and sending queries, your queries are likely fast and you should be fine setting the `max` to a low value like 10 -- or just leave it alone, since `10` is the default. +### Vercel + +If you're running on Vercel with [fluid compute](https://vercel.com/kb/guide/efficiently-manage-database-connection-pools-with-fluid-compute), your serverless functions can handle multiple requests concurrently and stick around between invocations. In this case, you can treat it similarly to a traditional long-lived process and use a default-ish pool size of `10`. The pool will stay warm across requests and you'll get the benefits of connection reuse. You'll probably need to put pgBouncer (or some kinda pooler like what is offered w/ supabase, rds, gcp, etc) in front of your database as vercel worker count can grow quite a bit larger than the number of reasonable max connections postgres can handle. + +### Cloudflare workers + +In a fully stateless serverless environment like cloudflare workers where your worker is killed, suspended, moved to a new compute node, or shut down at the end of every request, you'll still probably be okay with a pool size `max` of `10` though you can lower it if you start hitting connection exhaustion limits on your pooler. In cloudflare the pooler is hyperdrive and in my experience it works fantastically at pooling w/ their workers setup. Make sure at the end of your serverless handler, after everything is done, you close the pool and dispose of the pool by calling `pool.end()`. Setting the pool to a size larger than 1 is still recommeded as things like tRPC and other server-side routing & request batching code could result in multiple independent queries executing at the same time. With a pool size of `1` you are turning what is "a few things at once" into all things waiting in line one after another on the one available client in the pool. + ## pg-bouncer, RDS-proxy, etc. I'm not sure of all the pooling services for Postgres. I haven't used any myself. Throughout the years of working on `pg`, I've addressed issues caused by various proxies behaving differently than an actual Postgres backend. There are also gotchas with things like transactions. On the other hand, plenty of people run these with much success. In this situation, I would just recommend using some small but reasonable `max` value like the default value of `10` as it can still be helpful to keep a few TCP sockets from your services to the Postgres proxy open. @@ -23,3 +31,7 @@ I'm not sure of all the pooling services for Postgres. I haven't used any myself ## Conclusion, tl;dr It's a bit of a complicated topic and doesn't have much impact on things until you need to start scaling. At that point, your number of connections _still_ probably won't be your scaling bottleneck. It's worth thinking about a bit, but mostly I'd just leave the pool size to the default of `10` until you run into troubles: hopefully you never do! + +## Need help? + +In my career this has been the most error-prone thing related to running postgres & node. Particularly with the differences in various serverless providers (Cloudflare, Vercel, Lamda, etc...) versus a more traditional hosting. If you have any questions or need help please don't hesitate to email me at [brian.m.carlson@gmail.com](mailto:brian.m.carlson@gmail.com]) or reach out on GitHub. From dbbcb1d879bda1544b6e2024c41faf9ea4c316bf Mon Sep 17 00:00:00 2001 From: Brian Carlson Date: Wed, 4 Mar 2026 18:16:35 -0600 Subject: [PATCH 5/7] Grammar --- docs/pages/guides/pool-sizing.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/pages/guides/pool-sizing.md b/docs/pages/guides/pool-sizing.md index dc514ca2d..430e69190 100644 --- a/docs/pages/guides/pool-sizing.md +++ b/docs/pages/guides/pool-sizing.md @@ -18,11 +18,11 @@ If the number of instances of your services which connect to your database is mo ### Vercel -If you're running on Vercel with [fluid compute](https://vercel.com/kb/guide/efficiently-manage-database-connection-pools-with-fluid-compute), your serverless functions can handle multiple requests concurrently and stick around between invocations. In this case, you can treat it similarly to a traditional long-lived process and use a default-ish pool size of `10`. The pool will stay warm across requests and you'll get the benefits of connection reuse. You'll probably need to put pgBouncer (or some kinda pooler like what is offered w/ supabase, rds, gcp, etc) in front of your database as vercel worker count can grow quite a bit larger than the number of reasonable max connections postgres can handle. +If you're running on Vercel with [fluid compute](https://vercel.com/kb/guide/efficiently-manage-database-connection-pools-with-fluid-compute), your serverless functions can handle multiple requests concurrently and stick around between invocations. In this case, you can treat it similarly to a traditional long-lived process and use a default-ish pool size of `10`. The pool will stay warm across requests and you'll get the benefits of connection reuse. You'll probably need to put pgBouncer (or some kind of pooler like what is offered with Supabase, RDS, GCP, etc.) in front of your database, as Vercel worker count can grow quite a bit larger than the number of reasonable max connections Postgres can handle. ### Cloudflare workers -In a fully stateless serverless environment like cloudflare workers where your worker is killed, suspended, moved to a new compute node, or shut down at the end of every request, you'll still probably be okay with a pool size `max` of `10` though you can lower it if you start hitting connection exhaustion limits on your pooler. In cloudflare the pooler is hyperdrive and in my experience it works fantastically at pooling w/ their workers setup. Make sure at the end of your serverless handler, after everything is done, you close the pool and dispose of the pool by calling `pool.end()`. Setting the pool to a size larger than 1 is still recommeded as things like tRPC and other server-side routing & request batching code could result in multiple independent queries executing at the same time. With a pool size of `1` you are turning what is "a few things at once" into all things waiting in line one after another on the one available client in the pool. +In a fully stateless serverless environment like Cloudflare Workers where your worker is killed, suspended, moved to a new compute node, or shut down at the end of every request, you'll still probably be okay with a pool size `max` of `10`, though you can lower it if you start hitting connection exhaustion limits on your pooler. In Cloudflare the pooler is Hyperdrive, and in my experience it works fantastically with their workers setup. Make sure at the end of your serverless handler, after everything is done, you close and dispose of the pool by calling `pool.end()`. Setting the pool to a size larger than 1 is still recommended, as things like tRPC and other server-side routing & request batching code could result in multiple independent queries executing at the same time. With a pool size of `1` you are turning what is "a few things at once" into all things waiting in line one after another on the one available client in the pool. ## pg-bouncer, RDS-proxy, etc. @@ -34,4 +34,4 @@ It's a bit of a complicated topic and doesn't have much impact on things until y ## Need help? -In my career this has been the most error-prone thing related to running postgres & node. Particularly with the differences in various serverless providers (Cloudflare, Vercel, Lamda, etc...) versus a more traditional hosting. If you have any questions or need help please don't hesitate to email me at [brian.m.carlson@gmail.com](mailto:brian.m.carlson@gmail.com]) or reach out on GitHub. +In my career, this has been the most error-prone thing related to running Postgres & Node, particularly with the differences in various serverless providers (Cloudflare, Vercel, Lambda, etc.) versus more traditional hosting. If you have any questions or need help, please don't hesitate to email me at [brian.m.carlson@gmail.com](mailto:brian.m.carlson@gmail.com) or reach out on GitHub. From dbdb4b83d8d69393a22973a376e4aaa90d92ee9a Mon Sep 17 00:00:00 2001 From: Brian Carlson Date: Wed, 4 Mar 2026 18:35:47 -0600 Subject: [PATCH 6/7] Add better footer --- docs/theme.config.js | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/docs/theme.config.js b/docs/theme.config.js index 29f115cb0..f59cafa5e 100644 --- a/docs/theme.config.js +++ b/docs/theme.config.js @@ -15,7 +15,20 @@ export default { next: true, }, footer: { - text: `MIT ${new Date().getFullYear()} © Brian Carlson.`, + content: ( + + As of 2026-03-01 I am taking a break from the workforce to focus entirely on this project! Please consider{' '} + + sponsoring this work on GitHub + + ! + + ), }, editLink: { text: 'Edit this page on GitHub', From b0ba97045970247920444d4b45b3c729d36c0bfb Mon Sep 17 00:00:00 2001 From: Brian Carlson Date: Wed, 4 Mar 2026 18:43:41 -0600 Subject: [PATCH 7/7] Final updates --- docs/pages/features/queries.mdx | 6 +++--- docs/theme.config.js | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/docs/pages/features/queries.mdx b/docs/pages/features/queries.mdx index 39bcfbe1d..63ecdde1e 100644 --- a/docs/pages/features/queries.mdx +++ b/docs/pages/features/queries.mdx @@ -26,7 +26,7 @@ console.log(res.rows[0]) // { name: 'brianc', email: 'brian.m.carlson@gmail.com' } ``` -
+
PostgreSQL does not support parameters for identifiers. If you need to have dynamic database, schema, table, or column names (e.g. in DDL statements) use [pg-format](https://www.npmjs.com/package/pg-format) package for handling escaping these values to ensure you do not have SQL injection!
@@ -99,8 +99,8 @@ console.log(res.rows[0]) In the above example the first time the client sees a query with the name `'fetch-user'` it will send a 'parse' request to the PostgreSQL server & execute the query as normal. The second time, it will skip the 'parse' request and send the _name_ of the query to the PostgreSQL server. -
-
+
+
Be careful not to fall into the trap of premature optimization. Most of your queries will likely not benefit much, if at all, from using prepared statements. This is a somewhat "power user" feature of PostgreSQL that is best used when you know how to use it - namely with very complex queries with lots of joins and advanced operations like union and switch statements. I rarely use this feature in my own apps unless writing complex aggregate queries for reports and I know the reports are going to be executed very frequently.
diff --git a/docs/theme.config.js b/docs/theme.config.js index f59cafa5e..03ba3665c 100644 --- a/docs/theme.config.js +++ b/docs/theme.config.js @@ -68,6 +68,21 @@ l-161 -22 -94 41 c-201 87 -327 113 -533 112 -77 -1 -166 -7 -196 -13z m-89 chat: { link: 'https://discord.gg/2afXp5vUWm', }, + navbar: { + extraContent: ( + + + + + + ), + }, head: ( <>