<cfscript>

	// Distributed locks can prevent different servers from stepping on each other's
	// processing workflows. However, in a horizontally-scaled system, pods can die at
	// any time (ex, the pod might crash, Kubernetes might need to suddenly schedule the
	// pod on a different node, or Amazon AWS might revoke a spot-instance). As such, an
	// "open lock" may be orphaned unexpectedly, leaving the lock OPEN for an
	// unnecessarily long period of time. For long-lived locks, this can pose a problem
	// because it leaves the system in an unmanaged state. To cope with this, we can
	// create a lock with shorter TTL and then use a BACKGROUND THREAD to start pushing
	// that TTL out into the future. This way, if the process dies unexpectedly, the
	// underlying Redis key will be expunged shortly thereafter.
	synchronizeAcrossNodes(
		"synchronized-processing-lock",
		() => {

			// Simulate some "work" inside the distributed lock.
			sleep( 20 * 1000 );

			echo( "Woot! I can haz success! Lock will be released!" );

		}
	);

	// ------------------------------------------------------------------------------- //
	// ------------------------------------------------------------------------------- //

	/**
	* I automatically wrap a distributed lock around the given operator. Any value
	* returned from the operator is passed back to calling context.
	* 
	* @lockName I am the name of the distributed lock.
	* @lockOperator I am the operator to synchronize.
	*/
	public any function synchronizeAcrossNodes(
		required string lockName,
		required function lockOperator
		) {

		// CAUTION: Throws an error if lock cannot be obtained.
		var distributedLock = new DistributedLock( application.redisPool, lockName )
			.get()
		;

		try {

			return( lockOperator() );

		} finally {

			distributedLock.release();

		}

	}

</cfscript>