From: Peter Zijlstra on
On Thu, 2010-02-04 at 16:54 +0100, Borislav Petkov wrote:
> On Thu, Feb 04, 2010 at 04:13:52PM +0100, Peter Zijlstra wrote:
> > hweight_long() isn't an arch primitive, only __arch_hweight{8,16,32,64}
> > are.
>
> Yeah, I'm still looking for the proper location. hweight_long() is the
> generic version so do we want to do the
>
> #ifndef __HAVE_ARCH_POPCNT
> static inline unsigned long hweight_long(unsigned long w)
> ....
>
> #endif
>
> thing and define a x86-specific version?

No, just don't touch hweight_long(), simply provide
__arch_hweight{8,16,32,64} and all will be well.

hweight_long() is provided by <include/linux.h> and is constructed from
height32() and hweight64() depending on the actual word size.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Peter Zijlstra on
On Fri, 2010-02-05 at 13:11 +0100, Borislav Petkov wrote:
> On Thu, Feb 04, 2010 at 05:04:17PM +0100, Peter Zijlstra wrote:
> > No, just don't touch hweight_long(), simply provide
> > __arch_hweight{8,16,32,64} and all will be well.
>
> Ok, another day, another version :)
>
> It is, of course, completely untested but it builds and the asm looks
> ok. I think I've addressed all concerns so far.

please work against a tree that has:

http://lkml.org/lkml/2010/2/4/119

in.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Borislav Petkov on
On Thu, Feb 04, 2010 at 05:04:17PM +0100, Peter Zijlstra wrote:
> No, just don't touch hweight_long(), simply provide
> __arch_hweight{8,16,32,64} and all will be well.

Ok, another day, another version :)

It is, of course, completely untested but it builds and the asm looks
ok. I think I've addressed all concerns so far.

--
arch/x86/include/asm/hweight.h | 14 ++++++++
arch/x86/lib/Makefile | 2 +-
arch/x86/lib/hweight.c | 57 ++++++++++++++++++++++++++++++++++
include/asm-generic/bitops/hweight.h | 45 ++++++++++++++++++++++++--
lib/hweight.c | 16 +++++-----
5 files changed, 121 insertions(+), 13 deletions(-)
create mode 100644 arch/x86/include/asm/hweight.h
create mode 100644 arch/x86/lib/hweight.c

diff --git a/arch/x86/include/asm/hweight.h b/arch/x86/include/asm/hweight.h
new file mode 100644
index 0000000..762125f
--- /dev/null
+++ b/arch/x86/include/asm/hweight.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_HWEIGHT_H
+#define _ASM_X86_HWEIGHT_H
+
+#define __arch_hweight8 __arch_hweight8
+#define __arch_hweight16 __arch_hweight16
+#define __arch_hweight32 __arch_hweight32
+#define __arch_hweight64 __arch_hweight64
+
+extern unsigned int __arch_hweight8(unsigned int);
+extern unsigned int __arch_hweight16(unsigned int);
+extern unsigned int __arch_hweight32(unsigned int);
+extern unsigned long __arch_hweight64(__u64);
+
+#endif /*_ASM_X86_HWEIGHT_H */
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index cffd754..e811bbd 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -22,7 +22,7 @@ lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
lib-$(CONFIG_KPROBES) += insn.o inat.o

-obj-y += msr.o msr-reg.o msr-reg-export.o
+obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o

ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
diff --git a/arch/x86/lib/hweight.c b/arch/x86/lib/hweight.c
new file mode 100644
index 0000000..3cf51c8
--- /dev/null
+++ b/arch/x86/lib/hweight.c
@@ -0,0 +1,57 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#define POPCNT32 ".byte 0xf3\n\t.byte 0x0f\n\t.byte 0xb8\n\t.byte 0xff"
+#define POPCNT64 ".byte 0xf3\n\t.byte 0x48\n\t.byte 0x0f\n\t.byte 0xb8\n\t.byte 0xff"
+
+#define __arch_hweight_alt(size) \
+ ALTERNATIVE("call _hweight"#size, POPCNT##size, X86_FEATURE_POPCNT)
+
+unsigned int __arch_hweight16(unsigned int w)
+{
+ unsigned int res = 0;
+
+ asm volatile("xor %%dh, %%dh\n\t"
+ __arch_hweight_alt(32)
+ : "=di" (res)
+ : "di" (w)
+ : "ecx", "memory");
+
+ return res;
+}
+EXPORT_SYMBOL(__arch_hweight16);
+
+unsigned int __arch_hweight8(unsigned int w)
+{
+ return __arch_hweight16(w & 0xff);
+}
+EXPORT_SYMBOL(__arch_hweight8);
+
+unsigned int __arch_hweight32(unsigned int w)
+{
+ unsigned int res = 0;
+
+ asm volatile(__arch_hweight_alt(32)
+ : "=di" (res)
+ : "di" (w)
+ : "ecx", "memory");
+
+ return res;
+
+}
+EXPORT_SYMBOL(__arch_hweight32);
+
+unsigned long __arch_hweight64(__u64 w)
+{
+ unsigned int res = 0;
+
+ asm volatile(__arch_hweight_alt(64)
+ : "=di" (res)
+ : "di" (w)
+ : "rsi", "rcx", "r8", "r9", "r10", "r11",
+ "memory");
+
+ return res;
+}
+EXPORT_SYMBOL(__arch_hweight64);
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h
index fbbc383..340ad4e 100644
--- a/include/asm-generic/bitops/hweight.h
+++ b/include/asm-generic/bitops/hweight.h
@@ -2,10 +2,47 @@
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_

#include <asm/types.h>
+#include <asm/hweight.h>

-extern unsigned int hweight32(unsigned int w);
-extern unsigned int hweight16(unsigned int w);
-extern unsigned int hweight8(unsigned int w);
-extern unsigned long hweight64(__u64 w);
+extern unsigned int _hweight32(unsigned int w);
+extern unsigned int _hweight16(unsigned int w);
+extern unsigned int _hweight8(unsigned int w);
+extern unsigned long _hweight64(__u64 w);
+
+static inline unsigned int hweight8(unsigned int w)
+{
+#ifdef __arch_hweight8
+ return __arch_hweight8(w);
+#else
+ return _hweight8(w);
+#endif
+}
+
+static inline unsigned int hweight16(unsigned int w)
+{
+#ifdef __arch_hweight16
+ return __arch_hweight16(w);
+#else
+ return _hweight16(w);
+#endif
+}
+
+static inline unsigned int hweight32(unsigned int w)
+{
+#ifdef __arch_hweight32
+ return __arch_hweight32(w);
+#else
+ return _hweight32(w);
+#endif
+}
+
+static inline unsigned long hweight64(__u64 w)
+{
+#ifdef __arch_hweight64
+ return __arch_hweight64(w);
+#else
+ return _hweight64(w);
+#endif
+}

#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/lib/hweight.c b/lib/hweight.c
index 389424e..f7b81a1 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -9,7 +9,7 @@
* The Hamming Weight of a number is the total number of bits set in it.
*/

-unsigned int hweight32(unsigned int w)
+unsigned int _hweight32(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
@@ -17,26 +17,26 @@ unsigned int hweight32(unsigned int w)
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
}
-EXPORT_SYMBOL(hweight32);
+EXPORT_SYMBOL(_hweight32);

-unsigned int hweight16(unsigned int w)
+unsigned int _hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
-EXPORT_SYMBOL(hweight16);
+EXPORT_SYMBOL(_hweight16);

-unsigned int hweight8(unsigned int w)
+unsigned int _hweight8(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55);
res = (res & 0x33) + ((res >> 2) & 0x33);
return (res + (res >> 4)) & 0x0F;
}
-EXPORT_SYMBOL(hweight8);
+EXPORT_SYMBOL(_hweight8);

-unsigned long hweight64(__u64 w)
+unsigned long _hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
@@ -56,4 +56,4 @@ unsigned long hweight64(__u64 w)
#endif
#endif
}
-EXPORT_SYMBOL(hweight64);
+EXPORT_SYMBOL(_hweight64);
--
1.6.4.2


--
Regards/Gruss,
Boris.

-
Advanced Micro Devices, Inc.
Operating Systems Research Center
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Borislav Petkov on
On Fri, Feb 05, 2010 at 01:54:42PM -0800, H. Peter Anvin wrote:
> On 02/05/2010 04:11 AM, Borislav Petkov wrote:
> > +
> > +unsigned int __arch_hweight16(unsigned int w)
> > +{
> > + unsigned int res = 0;
> > +
> > + asm volatile("xor %%dh, %%dh\n\t"
> > + __arch_hweight_alt(32)
> > + : "=di" (res)
> > + : "di" (w)
> > + : "ecx", "memory");
> > +
>
> This is wrong in more ways than I can shake a stick at.

Thanks for reviewing it though - how else would I learn :).

> a) "di" doesn't mean the DI register - it means the DX register (d) or
> an immediate (i). Since you don't have any reference to either %0 or %1
> in your code, you have no way of knowing which one it is. The
> constraint for the di register is "D".

right.

> b) On 32 bits, the first argument register is in %eax (with %edx used
> for the upper half of a 32-bit argument), but on 64 bits, the first
> argument is in %rdi, with the return still in %rax.

Sure, it is right there in arch/x86/include/asm/calling.h. Shame on me.

> c) You call a C function, but you don't clobber the set of registers
> that a C function would clobber. You either need to put the function in
> an assembly wrapper (which is better in the long run), or clobber the
> full set of registers that is clobbered by a C function (which is better
> in the short term) -- which is eax, edx, ecx on 32 bits, but rax, rdi,
> esi, rdx, rcx, r8, r9, r10, r11 on 64 bits.

I think you mean rsi instead of esi here.

Well, the example Brian pointed me to - __mutex_fastpath_lock - lists
the full set of clobbered registers. Please elaborate on the assembly
wrapper for the function, wouldn't I need to list all the clobbered
registers there too or am I missing something?

> d) On the other hand, you do *not* need a "memory" clobber.

Right, in this case we have all non-barrier like inlines so no memory
clobber, according to the comment above alternative() macro.

Thanks.

--
Regards/Gruss,
Boris.

-
Advanced Micro Devices, Inc.
Operating Systems Research Center
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Borislav Petkov on
On Sat, Feb 06, 2010 at 05:55:47PM -0800, H. Peter Anvin wrote:
> > Well, the example Brian pointed me to - __mutex_fastpath_lock - lists
> > the full set of clobbered registers. Please elaborate on the assembly
> > wrapper for the function, wouldn't I need to list all the clobbered
> > registers there too or am I missing something?
> >
>
> The notion there would be that you do push/pop in the assembly wrapper.

Oh yes, something similar to SAVE/RESTORE_ALL in
<arch/x86/kernel/entry_32.S> could work. Good idea!

> >> d) On the other hand, you do *not* need a "memory" clobber.
> >
> > Right, in this case we have all non-barrier like inlines so no memory
> > clobber, according to the comment above alternative() macro.
>
> OK, I'm missing something here.
>
> A few more notions:
>
> a. This is exactly the kind of code where you don't want to put
> "volatile" on your asm statement, because it's a pure compute.
>
> b. It is really rather pointless to go through the whole alternatives
> work if you are then going to put it inside a function which isn't an
> inline ...

Well, in the second version I did replace a 'call _hweightXX' with
the actual popcnt opcode so the alternatives is only needed to do the
replacement during boot. We might just as well do

if (X86_FEATURE_POPCNT)
__hw_popcnt()
else
__software_hweight()

The only advantage of the alternatives is that it would save us the
if-else test above each time we do cpumask_weight. However, the if-else
approach is much more readable and obviates the need for all that macro
magic and taking special care of calling c function from within asm. And
since we do not call cpumask_weight all that often I'll honestly opt for
alternative-less solution...

Hmm...

Thanks,
Boris.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/