From a77798d9125e799d3a2cdc3e7a514fb869219ed7 Mon Sep 17 00:00:00 2001
From: Andres Freund <andres@anarazel.de>
Date: Mon, 8 Jun 2020 16:36:51 -0700
Subject: [PATCH v1 3/4] Add basic spinlock tests to regression tests.

As s_lock_test, the already existing test for spinlocks, isn't run in
an automated fashion (and doesn't test a normal backend environment),
adding tests that are run as part of a normal regression run is a good
idea.

Currently the new tests are run as part of the pre-existing
test_atomic_ops() test. That can probably be quibbled about.

The only operations that s_lock_test tests but the new tests don't are
the detection of a stuck spinlock and S_LOCK_FREE (which is otherwise
unused).

This currently contains a test for more than INT_MAX spinlocks (only
run with --disable-spinlocks), to ensure the previous commit fixing a
bug with more than INT_MAX spinlock initializations is correct, but
that might be too slow to enable generally.

It might be worth retiring s_lock_test after this. The added coverage
of a stuck spinlock probably isn't worth the added complexity?

Author: Andres Freund
Reviewed-By:
Discussion: https://postgr.es/m/
Backpatch:
---
 src/test/regress/regress.c | 89 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 89 insertions(+)

diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index 960c155e5f2..a48f9de2532 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -794,6 +794,92 @@ test_atomic_uint64(void)
 	EXPECT_EQ_U64(pg_atomic_fetch_and_u64(&var, ~0), 0);
 }
 
+static void
+test_spinlock(void)
+{
+	{
+		struct test_lock_struct
+		{
+			uint32		data_before;
+			slock_t		lock;
+			uint32		data_after;
+		} struct_w_lock;
+
+		struct_w_lock.data_before = 0x44;
+		struct_w_lock.data_after = 0x17;
+
+		/* test basic operations via the SpinLock* API */
+		SpinLockInit(&struct_w_lock.lock);
+		SpinLockAcquire(&struct_w_lock.lock);
+		SpinLockRelease(&struct_w_lock.lock);
+
+		/* test basic operations via underlying S_* API */
+		S_INIT_LOCK(&struct_w_lock.lock);
+		S_LOCK(&struct_w_lock.lock);
+		S_UNLOCK(&struct_w_lock.lock);
+
+		/* and that "contended" acquisition works */
+		s_lock(&struct_w_lock.lock, "testfile", 17, "testfunc");
+		S_UNLOCK(&struct_w_lock.lock);
+
+		/*
+		 * Check, using TAS directly, that a single spin cyle doesn't block
+		 * when acquiring an already acquired lock.
+		 */
+#ifdef TAS
+		S_LOCK(&struct_w_lock.lock);
+		if (!TAS(&struct_w_lock.lock))
+			elog(ERROR, "acquired already held spinlock");
+
+#ifdef TAS_SPIN
+		if (!TAS_SPIN(&struct_w_lock.lock))
+			elog(ERROR, "acquired already held spinlock");
+#endif							/* defined(TAS_SPIN) */
+
+		S_UNLOCK(&struct_w_lock.lock);
+#endif							/* defined(TAS) */
+
+		/*
+		 * Verify that after all of this the non-lock contents are still
+		 * correct.
+		 */
+		EXPECT_EQ_U32(struct_w_lock.data_before, 0x44);
+		EXPECT_EQ_U32(struct_w_lock.data_after, 0x17);
+	}
+
+	/*
+	 * Ensure that allocating more than INT32_MAX simulated spinlocks
+	 * works. This is probably too expensive to run each regression test.
+	 */
+#ifndef HAVE_SPINLOCKS
+	{
+		/*
+		 * Initialize enough spinlocks to advance counter close to
+		 * wraparound. It's too expensive to perform acquire/release for each,
+		 * as those may be syscalls when the spinlock emulation is used (and
+		 * even just atomic TAS would be expensive).
+		 */
+		for (uint32 i = 0; i < INT32_MAX - 100000; i++)
+		{
+			slock_t lock;
+
+			SpinLockInit(&lock);
+		}
+
+		for (uint32 i = 0; i < 200000; i++)
+		{
+			slock_t lock;
+
+			SpinLockInit(&lock);
+
+			SpinLockAcquire(&lock);
+			SpinLockRelease(&lock);
+			SpinLockAcquire(&lock);
+			SpinLockRelease(&lock);
+		}
+	}
+#endif
+}
 
 PG_FUNCTION_INFO_V1(test_atomic_ops);
 Datum
@@ -805,6 +891,9 @@ test_atomic_ops(PG_FUNCTION_ARGS)
 
 	test_atomic_uint64();
 
+	/* XXX: Is there a better location for this? */
+	test_spinlock();
+
 	PG_RETURN_BOOL(true);
 }
 
-- 
2.25.0.114.g5b0ca878e0

