Skip to content

Commit

Permalink
rcu: fix implicit conversion in bit shift
Browse files Browse the repository at this point in the history
[ upstream commit ffe827f ]

../lib/rcu/rte_rcu_qsbr.c(101): warning C4334: '<<': result of 32-bit
 shift implicitly converted to 64 bits (was 64-bit shift intended?)
../lib/rcu/rte_rcu_qsbr.c(107): warning C4334: '<<': result of 32-bit
 shift implicitly converted to 64 bits (was 64-bit shift intended?)
../lib/rcu/rte_rcu_qsbr.c(145): warning C4334: '<<': result of 32-bit
 shift implicitly converted to 64 bits (was 64-bit shift intended?)

These warnings are being issued by the MSVC compiler. Since the result is
being stored in a variable of type uint64_t, it makes sense to shift a
64-bit number instead of shifting a 32-bit number and then having the
compiler to convert the result implicitly to 64 bits.
UINT64_C was used in the fix as it is the portable way to define a 64-bit
constant (ULL suffix is architecture dependent).

>From reading the code this is also a bugfix:
(1 << id), where id = thread_id & 0x3f, was wrong when thread_id > 0x1f.

Signed-off-by: Andre Muezerie <[email protected]>
  • Loading branch information
Andre Muezerie authored and steevenlee committed Dec 10, 2024
1 parent aca2600 commit c6222d0
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions lib/rcu/rte_rcu_qsbr.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,11 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
/* Check if the thread is already registered */
old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
rte_memory_order_relaxed);
if (old_bmap & 1UL << id)
if (old_bmap & RTE_BIT64(id))
return 0;

do {
new_bmap = old_bmap | (1UL << id);
new_bmap = old_bmap | RTE_BIT64(id);
success = rte_atomic_compare_exchange_strong_explicit(
__RTE_QSBR_THRID_ARRAY_ELM(v, i),
&old_bmap, new_bmap,
Expand All @@ -117,7 +117,7 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
if (success)
rte_atomic_fetch_add_explicit(&v->num_threads,
1, rte_memory_order_relaxed);
else if (old_bmap & (1UL << id))
else if (old_bmap & RTE_BIT64(id))
/* Someone else registered this thread.
* Counter should not be incremented.
*/
Expand Down Expand Up @@ -156,11 +156,11 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
/* Check if the thread is already unregistered */
old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
rte_memory_order_relaxed);
if (!(old_bmap & (1UL << id)))
if (!(old_bmap & RTE_BIT64(id)))
return 0;

do {
new_bmap = old_bmap & ~(1UL << id);
new_bmap = old_bmap & ~RTE_BIT64(id);
/* Make sure any loads of the shared data structure are
* completed before removal of the thread from the list of
* reporting threads.
Expand All @@ -173,7 +173,7 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
if (success)
rte_atomic_fetch_sub_explicit(&v->num_threads,
1, rte_memory_order_relaxed);
else if (!(old_bmap & (1UL << id)))
else if (!(old_bmap & RTE_BIT64(id)))
/* Someone else unregistered this thread.
* Counter should not be incremented.
*/
Expand Down Expand Up @@ -234,7 +234,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
t = rte_ctz64(bmap);
fprintf(f, "%u ", id + t);

bmap &= ~(1UL << t);
bmap &= ~RTE_BIT64(t);
}
}

Expand All @@ -261,7 +261,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
rte_atomic_load_explicit(
&v->qsbr_cnt[id + t].lock_cnt,
rte_memory_order_relaxed));
bmap &= ~(1UL << t);
bmap &= ~RTE_BIT64(t);
}
}

Expand Down

0 comments on commit c6222d0

Please sign in to comment.