| C LB+unlocklockonceonce+poacquireonce | 
 |  | 
 | (* | 
 |  * Result: Never | 
 |  * | 
 |  * If two locked critical sections execute on the same CPU, all accesses | 
 |  * in the first must execute before any accesses in the second, even if the | 
 |  * critical sections are protected by different locks.  Note: Even when a | 
 |  * write executes before a read, their memory effects can be reordered from | 
 |  * the viewpoint of another CPU (the kind of reordering allowed by TSO). | 
 |  *) | 
 |  | 
 | {} | 
 |  | 
 | P0(spinlock_t *s, spinlock_t *t, int *x, int *y) | 
 | { | 
 | 	int r1; | 
 |  | 
 | 	spin_lock(s); | 
 | 	r1 = READ_ONCE(*x); | 
 | 	spin_unlock(s); | 
 | 	spin_lock(t); | 
 | 	WRITE_ONCE(*y, 1); | 
 | 	spin_unlock(t); | 
 | } | 
 |  | 
 | P1(int *x, int *y) | 
 | { | 
 | 	int r2; | 
 |  | 
 | 	r2 = smp_load_acquire(y); | 
 | 	WRITE_ONCE(*x, 1); | 
 | } | 
 |  | 
 | exists (0:r1=1 /\ 1:r2=1) |