From: Heiko Stuebner <heiko.stuebner@...>
Date: Wed, 8 Feb 2023 23:38:31 +0100
Commit-Message
Adapt the suggestions for the assembly string functions that Andrew
suggested but that I didn't manage to include into the series that
got applied.
This includes improvements to two comments, removal of unneeded labels
and moving one instruction slightly higher to contradict an
explanatory comment.
Suggested-by: Andrew Jones <ajones@...>
Signed-off-by: Heiko Stuebner <heiko.stuebner@...>
Patch-Comment
arch/riscv/lib/strcmp.S | 6 ++++--
arch/riscv/lib/strlen.S | 10 +++++-----
arch/riscv/lib/strncmp.S | 16 +++++++---------
3 files changed, 16 insertions(+), 16 deletions(-)
Statistics
- 16 lines added
- 16 lines removed
Changes
@@ -40,7 +40,9 @@ SYM_FUNC_START(strcmp)
ret
/*
- * Variant of strcmp using the ZBB extension if available
+ * Variant of strcmp using the ZBB extension if available.
+ * The code was published as part of the bitmanip manual
+ * in Appendix A.
*/
#ifdef CONFIG_RISCV_ISA_ZBB
strcmp_zbb:
@@ -57,7 +59,7 @@ strcmp_zbb:
* a1 - string2
*
* Clobbers
- * t0, t1, t2, t3, t4, t5
+ * t0, t1, t2, t3, t4
*/
or t2, a0, a1
@@ -96,7 +96,7 @@ strlen_zbb:
* of valid bytes in this chunk.
*/
srli a0, t1, 3
- bgtu t3, a0, 3f
+ bgtu t3, a0, 2f
/* Prepare for the word comparison loop. */
addi t2, t0, SZREG
@@ -112,20 +112,20 @@ strlen_zbb:
addi t0, t0, SZREG
orc.b t1, t1
beq t1, t3, 1b
-2:
+
not t1, t1
CZ t1, t1
+ srli t1, t1, 3
- /* Get number of processed words. */
+ /* Get number of processed bytes. */
sub t2, t0, t2
/* Add number of characters in the first word. */
add a0, a0, t2
- srli t1, t1, 3
/* Add number of characters in the last word. */
add a0, a0, t1
-3:
+2:
ret
.option pop
@@ -70,7 +70,7 @@ strncmp_zbb:
li t5, -1
and t2, t2, SZREG-1
add t4, a0, a2
- bnez t2, 4f
+ bnez t2, 3f
/* Adjust limit for fast-path. */
andi t6, t4, -SZREG
@@ -114,23 +114,21 @@ strncmp_zbb:
ret
/* Simple loop for misaligned strings. */
-3:
- /* Restore limit for slow-path. */
.p2align 3
-4:
- bge a0, t4, 6f
+3:
+ bge a0, t4, 5f
lbu t0, 0(a0)
lbu t1, 0(a1)
addi a0, a0, 1
addi a1, a1, 1
- bne t0, t1, 5f
- bnez t0, 4b
+ bne t0, t1, 4f
+ bnez t0, 3b
-5:
+4:
sub a0, t0, t1
ret
-6:
+5:
li a0, 0
ret