From: Sam Ravnborg on
>
> Then, in order to also garbage-collect the sections, I added
>
> LDFLAGS_vmlinux += --gc-sections
>
> in top-level Makefile.
>
> This requires the additional patch (linux-2.6.35-rc4-fsgs.patch)
> which adds KEEP(section) directives to kernel linker stripts.
> Otherwise, linker will discard some crucial sections.
>

Changelog does not address why you need:

-Map $@.ldmap

and what effect they have.

And it is obvious that some archs should consolidate a little more from
asm-generic/vmlinux.lds.h.
But that said this patch looks much better than the initial versions posted.

How do you determine which sections needs the KEEP()?
Worth documenting for future when we add new sections.

Sam


--- linux-2.6.35-rc4.fs/Makefile
+++ linux-2.6.35-rc4-fsgs.obj/Makefile
@@ -610,6 +610,8 @@
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif

+LDFLAGS_vmlinux += --gc-sections
+
# Default kernel image to build when no specific target is given.
# KBUILD_IMAGE may be overruled on the command line or
# set in the environment
@@ -705,7 +707,7 @@
# Rule to link vmlinux - also used during CONFIG_KALLSYMS
# May be overridden by arch/$(ARCH)/Makefile
quiet_cmd_vmlinux__ ?= LD $@
- cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ \
+ cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ -Map $@.ldmap \
-T $(vmlinux-lds) $(vmlinux-init) \
--start-group $(vmlinux-main) --end-group \
$(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^)
--- linux-2.6.35-rc4.fs/arch/arm/kernel/vmlinux.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/arm/kernel/vmlinux.lds.S
@@ -157,7 +157,7 @@
. = ALIGN(32);
__start___ex_table = .;
#ifdef CONFIG_MMU
- *(__ex_table)
+ KEEP(*(__ex_table))
#endif
__stop___ex_table = .;

--- linux-2.6.35-rc4.fs/arch/blackfin/kernel/vmlinux.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/blackfin/kernel/vmlinux.lds.S
@@ -51,7 +51,7 @@

. = ALIGN(16);
___start___ex_table = .;
- *(__ex_table)
+ KEEP(*(__ex_table))
___stop___ex_table = .;

__etext = .;
--- linux-2.6.35-rc4.fs/arch/cris/boot/rescue/rescue_v32.lds
+++ linux-2.6.35-rc4-fsgs.obj/arch/cris/boot/rescue/rescue_v32.lds
@@ -36,7 +36,7 @@
/* Get rid of stuff from EXPORT_SYMBOL(foo). */
/DISCARD/ :
{
- *(__ksymtab_strings)
- *(__ksymtab)
+ KEEP(*(__ksymtab_strings))
+ *(__ksymtab)
}
}
--- linux-2.6.35-rc4.fs/arch/cris/kernel/vmlinux.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/cris/kernel/vmlinux.lds.S
@@ -72,7 +72,7 @@
.init.setup : { INIT_SETUP(16) }
#ifdef CONFIG_ETRAX_ARCH_V32
__start___param = .;
- __param : { *(__param) }
+ __param : { KEEP(*(__param)) }
__stop___param = .;
#endif
.initcall.init : {
@@ -88,7 +88,7 @@
#ifdef CONFIG_BLK_DEV_INITRD
.init.ramfs : {
__initramfs_start = .;
- *(.init.ramfs)
+ KEEP(*(.init.ramfs))
__initramfs_end = .;
}
#endif
--- linux-2.6.35-rc4.fs/arch/h8300/kernel/vmlinux.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/h8300/kernel/vmlinux.lds.S
@@ -112,7 +112,7 @@
. = ALIGN(0x4) ;
INIT_SETUP(0x4)
___setup_start = .;
- *(.init.setup)
+ KEEP(*(.init.setup))
. = ALIGN(0x4) ;
___setup_end = .;
INIT_CALLS
--- linux-2.6.35-rc4.fs/arch/m68knommu/kernel/vmlinux.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/m68knommu/kernel/vmlinux.lds.S
@@ -72,7 +72,7 @@

. = ALIGN(16); /* Exception table */
__start___ex_table = .;
- *(__ex_table)
+ KEEP(*(__ex_table))
__stop___ex_table = .;

*(.rodata .rodata.*)
@@ -129,16 +129,16 @@

/* Kernel symbol table: GPL-future symbols */
__start___kcrctab_gpl_future = .;
- *(__kcrctab_gpl_future)
+ KEEP(*(__kcrctab_gpl_future))
__stop___kcrctab_gpl_future = .;

/* Kernel symbol table: strings */
- *(__ksymtab_strings)
+ KEEP(*(__ksymtab_strings))

/* Built-in module parameters */
. = ALIGN(4) ;
__start___param = .;
- *(__param)
+ KEEP(*(__param))
__stop___param = .;

. = ALIGN(4) ;
--- linux-2.6.35-rc4.fs/arch/microblaze/kernel/vmlinux.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/microblaze/kernel/vmlinux.lds.S
@@ -122,7 +122,7 @@

.init.ramfs ALIGN(4096) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
__initramfs_start = .;
- *(.init.ramfs)
+ KEEP(*(.init.ramfs))
__initramfs_end = .;
. = ALIGN(4);
LONG(0);
--- linux-2.6.35-rc4.fs/arch/mn10300/kernel/vmlinux.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/mn10300/kernel/vmlinux.lds.S
@@ -51,7 +51,7 @@
. = ALIGN(PAGE_SIZE);
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
- *(.smp_locks)
+ KEEP(*(.smp_locks))
__smp_locks_end = .;
}

@@ -62,9 +62,9 @@
INIT_DATA_SECTION(16)
. = ALIGN(4);
__alt_instructions = .;
- .altinstructions : { *(.altinstructions) }
+ .altinstructions : { KEEP(*(.altinstructions)) }
__alt_instructions_end = .;
- .altinstr_replacement : { *(.altinstr_replacement) }
+ .altinstr_replacement : { KEEP(*(.altinstr_replacement)) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : { EXIT_TEXT; }
--- linux-2.6.35-rc4.fs/arch/um/include/asm/common.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/um/include/asm/common.lds.S
@@ -76,10 +76,10 @@
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
- *(.altinstructions)
+ KEEP(*(.altinstructions))
__alt_instructions_end = .;
}
- .altinstr_replacement : { *(.altinstr_replacement) }
+ .altinstr_replacement : { KEEP(*(.altinstr_replacement)) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : { *(.exit.text) }
--- linux-2.6.35-rc4.fs/arch/x86/kernel/vmlinux.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/x86/kernel/vmlinux.lds.S
@@ -162,46 +162,46 @@

. = VSYSCALL_ADDR;
.vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
- *(.vsyscall_0)
+ KEEP(*(.vsyscall_0))
} :user

. = ALIGN(L1_CACHE_BYTES);
.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
- *(.vsyscall_fn)
+ KEEP(*(.vsyscall_fn))
}

. = ALIGN(L1_CACHE_BYTES);
.vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
- *(.vsyscall_gtod_data)
+ KEEP(*(.vsyscall_gtod_data))
}

vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
.vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
- *(.vsyscall_clock)
+ KEEP(*(.vsyscall_clock))
}
vsyscall_clock = VVIRT(.vsyscall_clock);


.vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
- *(.vsyscall_1)
+ KEEP(*(.vsyscall_1))
}
.vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
- *(.vsyscall_2)
+ KEEP(*(.vsyscall_2))
}

.vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
- *(.vgetcpu_mode)
+ KEEP(*(.vgetcpu_mode))
}
vgetcpu_mode = VVIRT(.vgetcpu_mode);

. = ALIGN(L1_CACHE_BYTES);
.jiffies : AT(VLOAD(.jiffies)) {
- *(.jiffies)
+ KEEP(*(.jiffies))
}
jiffies = VVIRT(.jiffies);

.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
- *(.vsyscall_3)
+ KEEP(*(.vsyscall_3))
}

. = __vsyscall_0 + PAGE_SIZE;
@@ -252,12 +252,12 @@
. = ALIGN(8);
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
__alt_instructions = .;
- *(.altinstructions)
+ KEEP(*(.altinstructions))
__alt_instructions_end = .;
}

.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
- *(.altinstr_replacement)
+ KEEP(*(.altinstr_replacement))
}

/*
@@ -290,7 +290,7 @@
. = ALIGN(PAGE_SIZE);
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
- *(.smp_locks)
+ KEEP(*(.smp_locks))
. = ALIGN(PAGE_SIZE);
__smp_locks_end = .;
}
--- linux-2.6.35-rc4.fs/arch/x86/vdso/vdso-layout.lds.S
+++ linux-2.6.35-rc4-fsgs.obj/arch/x86/vdso/vdso-layout.lds.S
@@ -34,8 +34,8 @@
*(.gnu.linkonce.b.*)
}

- .altinstructions : { *(.altinstructions) }
- .altinstr_replacement : { *(.altinstr_replacement) }
+ .altinstructions : { KEEP(*(.altinstructions)) }
+ .altinstr_replacement : { KEEP(*(.altinstr_replacement)) }

/*
* Align the actual code well away from the non-instruction data.
--- linux-2.6.35-rc4.fs/include/asm-generic/vmlinux.lds.h
+++ linux-2.6.35-rc4-fsgs.obj/include/asm-generic/vmlinux.lds.h
@@ -229,25 +229,25 @@
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
- *(.pci_fixup_early) \
+ KEEP(*(.pci_fixup_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
- *(.pci_fixup_header) \
+ KEEP(*(.pci_fixup_header)) \
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
- *(.pci_fixup_final) \
+ KEEP(*(.pci_fixup_final)) \
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
- *(.pci_fixup_enable) \
+ KEEP(*(.pci_fixup_enable)) \
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
- *(.pci_fixup_resume) \
+ KEEP(*(.pci_fixup_resume)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
- *(.pci_fixup_resume_early) \
+ KEEP(*(.pci_fixup_resume_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
- *(.pci_fixup_suspend) \
+ KEEP(*(.pci_fixup_suspend)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
} \
\
@@ -270,76 +270,76 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(__ksymtab) \
+ KEEP(*(__ksymtab)) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(__ksymtab_gpl) \
+ KEEP(*(__ksymtab_gpl)) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
- *(__ksymtab_unused) \
+ KEEP(*(__ksymtab_unused)) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
- *(__ksymtab_unused_gpl) \
+ KEEP(*(__ksymtab_unused_gpl)) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
- *(__ksymtab_gpl_future) \
+ KEEP(*(__ksymtab_gpl_future)) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(__kcrctab) \
+ KEEP(*(__kcrctab)) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(__kcrctab_gpl) \
+ KEEP(*(__kcrctab_gpl)) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
- *(__kcrctab_unused) \
+ KEEP(*(__kcrctab_unused)) \
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
- *(__kcrctab_unused_gpl) \
+ KEEP(*(__kcrctab_unused_gpl)) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
- *(__kcrctab_gpl_future) \
+ KEEP(*(__kcrctab_gpl_future)) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- *(__ksymtab_strings) \
+ KEEP(*(__ksymtab_strings)) \
} \
\
/* __*init sections */ \
@@ -356,7 +356,7 @@
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
- *(__param) \
+ KEEP(*(__param)) \
VMLINUX_SYMBOL(__stop___param) = .; \
. = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
@@ -371,7 +371,7 @@
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .; \
}

@@ -424,7 +424,7 @@
#endif

/* Section used for early init (in .S files) */
-#define HEAD_TEXT *(.head.text)
+#define HEAD_TEXT KEEP(*(.head.text))

#define HEAD_TEXT_SECTION \
.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
@@ -438,7 +438,7 @@
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ex_table) = .; \
- *(__ex_table) \
+ KEEP(*(__ex_table)) \
VMLINUX_SYMBOL(__stop___ex_table) = .; \
}

@@ -591,29 +591,29 @@
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
VMLINUX_SYMBOL(__setup_start) = .; \
- *(.init.setup) \
+ KEEP(*(.init.setup)) \
VMLINUX_SYMBOL(__setup_end) = .;

#define INITCALLS \
- *(.initcallearly.init) \
+ KEEP(*(.initcallearly.init)) \
VMLINUX_SYMBOL(__early_initcall_end) = .; \
- *(.initcall0.init) \
- *(.initcall0s.init) \
- *(.initcall1.init) \
- *(.initcall1s.init) \
- *(.initcall2.init) \
- *(.initcall2s.init) \
- *(.initcall3.init) \
- *(.initcall3s.init) \
- *(.initcall4.init) \
- *(.initcall4s.init) \
- *(.initcall5.init) \
- *(.initcall5s.init) \
- *(.initcallrootfs.init) \
- *(.initcall6.init) \
- *(.initcall6s.init) \
- *(.initcall7.init) \
- *(.initcall7s.init)
+ KEEP(*(.initcall0.init)) \
+ KEEP(*(.initcall0s.init)) \
+ KEEP(*(.initcall1.init)) \
+ KEEP(*(.initcall1s.init)) \
+ KEEP(*(.initcall2.init)) \
+ KEEP(*(.initcall2s.init)) \
+ KEEP(*(.initcall3.init)) \
+ KEEP(*(.initcall3s.init)) \
+ KEEP(*(.initcall4.init)) \
+ KEEP(*(.initcall4s.init)) \
+ KEEP(*(.initcall5.init)) \
+ KEEP(*(.initcall5s.init)) \
+ KEEP(*(.initcallrootfs.init)) \
+ KEEP(*(.initcall6.init)) \
+ KEEP(*(.initcall6s.init)) \
+ KEEP(*(.initcall7.init)) \
+ KEEP(*(.initcall7s.init))

#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
@@ -622,19 +622,19 @@

#define CON_INITCALL \
VMLINUX_SYMBOL(__con_initcall_start) = .; \
- *(.con_initcall.init) \
+ KEEP(*(.con_initcall.init)) \
VMLINUX_SYMBOL(__con_initcall_end) = .;

#define SECURITY_INITCALL \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .;

#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
- *(.init.ramfs) \
+ KEEP(*(.init.ramfs)) \
VMLINUX_SYMBOL(__initramfs_end) = .;
#else
#define INIT_RAM_FS
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Denys Vlasenko on
On Friday 23 July 2010 21:10, Sam Ravnborg wrote:
> >
> > * modpost fix for 64k+ sections: linux-2.6.35-rc4-fs.modpost.patch
> > This patch is in -mm, it still not reach mainline...
> >
>
> Some comments below - but noting fundamental.
>
> + /* Fixup for more than 64k sections */
> + info->num_sections = hdr->e_shnum;
> + if (info->num_sections == 0) { /* more than 64k sections? */
> + /* note: it doesn't need shndx2secindex() */
> + info->num_sections = TO_NATIVE(sechdrs[0].sh_size);
> + }
> I had to read the above twice to get it.
> How about something like this:
>
> /* Fixup for more than 64k sections */
> if (hdr->e_shnum == 0) {
> /*
> * There are more than 64k sections,
> * read count from .sh_size.
> * note: it doesn't need shndx2secindex()
> */
> info->num_sections = TO_NATIVE(sechdrs[0].sh_size);
> }
> else {
> info->num_sections = hdr->e_shnum;
> }
>
>
> + info->secindex_strings = hdr->e_shstrndx;
> + if (info->secindex_strings == SHN_XINDEX)
> + info->secindex_strings =
> + shndx2secindex(TO_NATIVE(sechdrs[0].sh_link));
>
> Likewise here...

Done both.

....
> - for (i = 1; i < hdr->e_shnum; i++) {
> - const char *secstrings
> - = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
> + secstrings = (void *)hdr + sechdrs[info->secindex_strings].sh_offset;
>
> Moving this assignnet out of the loop is an unrelated
> but welcome change.

I take you are ok with it?

Please find updated patch below.

Signed-off-by: Denys Vlasenko <vda.linux(a)googlemail.com>
--
vda



--- linux-2.6.35-rc4/scripts/mod/file2alias.c
+++ linux-2.6.35-rc4-fs.modpost/scripts/mod/file2alias.c
@@ -884,16 +884,16 @@
char *zeros = NULL;

/* We're looking for a section relative symbol */
- if (!sym->st_shndx || sym->st_shndx >= info->hdr->e_shnum)
+ if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections)
return;

/* Handle all-NULL symbols allocated into .bss */
- if (info->sechdrs[sym->st_shndx].sh_type & SHT_NOBITS) {
+ if (info->sechdrs[get_secindex(info, sym)].sh_type & SHT_NOBITS) {
zeros = calloc(1, sym->st_size);
symval = zeros;
} else {
symval = (void *)info->hdr
- + info->sechdrs[sym->st_shndx].sh_offset
+ + info->sechdrs[get_secindex(info, sym)].sh_offset
+ sym->st_value;
}

--- linux-2.6.35-rc4/scripts/mod/modpost.c
+++ linux-2.6.35-rc4-fs.modpost/scripts/mod/modpost.c
@@ -253,7 +253,7 @@
return export_unknown;
}

-static enum export export_from_sec(struct elf_info *elf, Elf_Section sec)
+static enum export export_from_sec(struct elf_info *elf, unsigned int sec)
{
if (sec == elf->export_sec)
return export_plain;
@@ -373,6 +373,8 @@
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
Elf_Sym *sym;
+ const char *secstrings;
+ unsigned int symtab_idx = ~0U, symtab_shndx_idx = ~0U;

hdr = grab_file(filename, &info->size);
if (!hdr) {
@@ -417,8 +419,27 @@
return 0;
}

+ if (hdr->e_shnum == 0) {
+ /*
+ * There are more than 64k sections,
+ * read count from .sh_size.
+ * note: it doesn't need shndx2secindex()
+ */
+ info->num_sections = TO_NATIVE(sechdrs[0].sh_size);
+ }
+ else {
+ info->num_sections = hdr->e_shnum;
+ }
+ if (hdr->e_shstrndx == SHN_XINDEX) {
+ info->secindex_strings =
+ shndx2secindex(TO_NATIVE(sechdrs[0].sh_link));
+ }
+ else {
+ info->secindex_strings = hdr->e_shstrndx;
+ }
+
/* Fix endianness in section headers */
- for (i = 0; i < hdr->e_shnum; i++) {
+ for (i = 0; i < info->num_sections; i++) {
sechdrs[i].sh_name = TO_NATIVE(sechdrs[i].sh_name);
sechdrs[i].sh_type = TO_NATIVE(sechdrs[i].sh_type);
sechdrs[i].sh_flags = TO_NATIVE(sechdrs[i].sh_flags);
@@ -431,9 +452,8 @@
sechdrs[i].sh_entsize = TO_NATIVE(sechdrs[i].sh_entsize);
}
/* Find symbol table. */
- for (i = 1; i < hdr->e_shnum; i++) {
- const char *secstrings
- = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ secstrings = (void *)hdr + sechdrs[info->secindex_strings].sh_offset;
+ for (i = 1; i < info->num_sections; i++) {
const char *secname;
int nobits = sechdrs[i].sh_type == SHT_NOBITS;

@@ -461,14 +481,26 @@
else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
info->export_gpl_future_sec = i;

- if (sechdrs[i].sh_type != SHT_SYMTAB)
- continue;
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ unsigned int sh_link_idx;
+ symtab_idx = i;
+ info->symtab_start = (void *)hdr +
+ sechdrs[i].sh_offset;
+ info->symtab_stop = (void *)hdr +
+ sechdrs[i].sh_offset + sechdrs[i].sh_size;
+ sh_link_idx = shndx2secindex(sechdrs[i].sh_link);
+ info->strtab = (void *)hdr +
+ sechdrs[sh_link_idx].sh_offset;
+ }

- info->symtab_start = (void *)hdr + sechdrs[i].sh_offset;
- info->symtab_stop = (void *)hdr + sechdrs[i].sh_offset
- + sechdrs[i].sh_size;
- info->strtab = (void *)hdr +
- sechdrs[sechdrs[i].sh_link].sh_offset;
+ /* 32bit section no. table? ("more than 64k sections") */
+ if (sechdrs[i].sh_type == SHT_SYMTAB_SHNDX) {
+ symtab_shndx_idx = i;
+ info->symtab_shndx_start = (void *)hdr +
+ sechdrs[i].sh_offset;
+ info->symtab_shndx_stop = (void *)hdr +
+ sechdrs[i].sh_offset + sechdrs[i].sh_size;
+ }
}
if (!info->symtab_start)
fatal("%s has no symtab?\n", filename);
@@ -480,6 +512,21 @@
sym->st_value = TO_NATIVE(sym->st_value);
sym->st_size = TO_NATIVE(sym->st_size);
}
+
+ if (symtab_shndx_idx != ~0U) {
+ Elf32_Word *p;
+ if (symtab_idx !=
+ shndx2secindex(sechdrs[symtab_shndx_idx].sh_link))
+ fatal("%s: SYMTAB_SHNDX has bad sh_link: %u!=%u\n",
+ filename,
+ shndx2secindex(sechdrs[symtab_shndx_idx].sh_link),
+ symtab_idx);
+ /* Fix endianness */
+ for (p = info->symtab_shndx_start; p < info->symtab_shndx_stop;
+ p++)
+ *p = TO_NATIVE(*p);
+ }
+
return 1;
}

@@ -514,7 +561,7 @@
Elf_Sym *sym, const char *symname)
{
unsigned int crc;
- enum export export = export_from_sec(info, sym->st_shndx);
+ enum export export = export_from_sec(info, get_secindex(info, sym));

switch (sym->st_shndx) {
case SHN_COMMON:
@@ -656,19 +703,19 @@
return "(unknown)";
}

-static const char *sec_name(struct elf_info *elf, int shndx)
+static const char *sec_name(struct elf_info *elf, int secindex)
{
Elf_Shdr *sechdrs = elf->sechdrs;
return (void *)elf->hdr +
- elf->sechdrs[elf->hdr->e_shstrndx].sh_offset +
- sechdrs[shndx].sh_name;
+ elf->sechdrs[elf->secindex_strings].sh_offset +
+ sechdrs[secindex].sh_name;
}

static const char *sech_name(struct elf_info *elf, Elf_Shdr *sechdr)
{
return (void *)elf->hdr +
- elf->sechdrs[elf->hdr->e_shstrndx].sh_offset +
- sechdr->sh_name;
+ elf->sechdrs[elf->secindex_strings].sh_offset +
+ sechdr->sh_name;
}

/* if sym is empty or point to a string
@@ -1047,11 +1094,14 @@
Elf_Sym *near = NULL;
Elf64_Sword distance = 20;
Elf64_Sword d;
+ unsigned int relsym_secindex;

if (relsym->st_name != 0)
return relsym;
+
+ relsym_secindex = get_secindex(elf, relsym);
for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
- if (sym->st_shndx != relsym->st_shndx)
+ if (get_secindex(elf, sym) != relsym_secindex)
continue;
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
@@ -1113,9 +1163,9 @@
for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
const char *symsec;

- if (sym->st_shndx >= SHN_LORESERVE)
+ if (is_shndx_special(sym->st_shndx))
continue;
- symsec = sec_name(elf, sym->st_shndx);
+ symsec = sec_name(elf, get_secindex(elf, sym));
if (strcmp(symsec, sec) != 0)
continue;
if (!is_valid_name(elf, sym))
@@ -1311,7 +1361,7 @@
const char *tosec;
const struct sectioncheck *mismatch;

- tosec = sec_name(elf, sym->st_shndx);
+ tosec = sec_name(elf, get_secindex(elf, sym));
mismatch = section_mismatch(fromsec, tosec);
if (mismatch) {
Elf_Sym *to;
@@ -1339,7 +1389,7 @@
Elf_Shdr *sechdr, Elf_Rela *r)
{
Elf_Shdr *sechdrs = elf->sechdrs;
- int section = sechdr->sh_info;
+ int section = shndx2secindex(sechdr->sh_info);

return (void *)elf->hdr + sechdrs[section].sh_offset +
r->r_offset - sechdrs[section].sh_addr;
@@ -1447,7 +1497,7 @@
r.r_addend = TO_NATIVE(rela->r_addend);
sym = elf->symtab_start + r_sym;
/* Skip special sections */
- if (sym->st_shndx >= SHN_LORESERVE)
+ if (is_shndx_special(sym->st_shndx))
continue;
check_section_mismatch(modname, elf, &r, sym, fromsec);
}
@@ -1505,7 +1555,7 @@
}
sym = elf->symtab_start + r_sym;
/* Skip special sections */
- if (sym->st_shndx >= SHN_LORESERVE)
+ if (is_shndx_special(sym->st_shndx))
continue;
check_section_mismatch(modname, elf, &r, sym, fromsec);
}
@@ -1530,7 +1580,7 @@
Elf_Shdr *sechdrs = elf->sechdrs;

/* Walk through all sections */
- for (i = 0; i < elf->hdr->e_shnum; i++) {
+ for (i = 0; i < elf->num_sections; i++) {
check_section(modname, elf, &elf->sechdrs[i]);
/* We want to process only relocation sections and not .init */
if (sechdrs[i].sh_type == SHT_RELA)
--- linux-2.6.35-rc4/scripts/mod/modpost.h
+++ linux-2.6.35-rc4-fs.modpost/scripts/mod/modpost.h
@@ -129,7 +129,50 @@
const char *strtab;
char *modinfo;
unsigned int modinfo_len;
+
+ /* support for 32bit section numbers */
+
+ unsigned int num_sections; /* max_secindex + 1 */
+ unsigned int secindex_strings;
+ /* if Nth symbol table entry has .st_shndx = SHN_XINDEX,
+ * take shndx from symtab_shndx_start[N] instead */
+ Elf32_Word *symtab_shndx_start;
+ Elf32_Word *symtab_shndx_stop;
};
+
+static inline int is_shndx_special(unsigned int i)
+{
+ return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
+}
+
+/* shndx is in [0..SHN_LORESERVE) U (SHN_HIRESERVE, 0xfffffff], thus:
+ * shndx == 0 <=> sechdrs[0]
+ * ......
+ * shndx == SHN_LORESERVE-1 <=> sechdrs[SHN_LORESERVE-1]
+ * shndx == SHN_HIRESERVE+1 <=> sechdrs[SHN_LORESERVE]
+ * shndx == SHN_HIRESERVE+2 <=> sechdrs[SHN_LORESERVE+1]
+ * ......
+ * fyi: sym->st_shndx is uint16, SHN_LORESERVE = ff00, SHN_HIRESERVE = ffff,
+ * so basically we map 0000..feff -> 0000..feff
+ * ff00..ffff -> (you are a bad boy, dont do it)
+ * 10000..xxxx -> ff00..(xxxx-0x100)
+ */
+static inline unsigned int shndx2secindex(unsigned int i)
+{
+ if (i <= SHN_HIRESERVE)
+ return i;
+ return i - (SHN_HIRESERVE + 1 - SHN_LORESERVE);
+}
+
+/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
+static inline unsigned int get_secindex(const struct elf_info *info,
+ const Elf_Sym *sym)
+{
+ if (sym->st_shndx != SHN_XINDEX)
+ return sym->st_shndx;
+ return shndx2secindex(info->symtab_shndx_start[sym -
+ info->symtab_start]);
+}

/* file2alias.c */
extern unsigned int cross_build;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Denys Vlasenko on
On Friday 23 July 2010 22:24, Sam Ravnborg wrote:
> > * fix for kernel linker stripts: linux-2.6.35-rc4-fs.fix-kernel-linker-scripts.patch
> > It makes _all_ linker scripts -ffunction/data-sections safe via:
> > - *(.data)
> > + *(.data .data.*)
> >
>
> This patch touches both the regular kernel linker scripts and the
> boot linker scripts.
> I would strongly prefer a split so you touched the boot linker
> scripts in a separate patch.

No problem.

> In addition the patch handles more sections than
> documented: rodata, bss, text.

Yes, that was assumed.

> The patch introduce the following syntax for input sections:
>
> *(.bss .bss.*)
>
> Where the kernel linker script would have one input
> section per line.
> Can we stick to the "layout" used in the kernel linker scripts?

Sure.


> The patch in addition in several places changes linker
> scripts that already uses the two-lines layout to a single line approach.
> These "fixes" should be omitted.

Reverted.

> Why do we need *(.bss .bss.*)?
> Does -fdata-sections introduce special .bss sectiosn too?

Yes, it does.

Please take a look at attached updated patches.

Signed-off-by: Denys Vlasenko <vda.linux(a)googlemail.com>
--
vda

From: Denys Vlasenko on
Hi Sam,

On Friday 23 July 2010 22:35, Sam Ravnborg wrote:
> >
> > Then, in order to also garbage-collect the sections, I added
> >
> > LDFLAGS_vmlinux += --gc-sections
> >
> > in top-level Makefile.
> >
> > This requires the additional patch (linux-2.6.35-rc4-fsgs.patch)
> > which adds KEEP(section) directives to kernel linker stripts.
> > Otherwise, linker will discard some crucial sections.
> >
>
> Changelog does not address why you need:
>
> -Map $@.ldmap
>
> and what effect they have.

I didn't plan to push the last step (--gc-sections) to mainline yet.
Thus the patch has debugging stuff in it.


> And it is obvious that some archs should consolidate a little more from
> asm-generic/vmlinux.lds.h.
> But that said this patch looks much better than the initial versions posted.
>
> How do you determine which sections needs the KEEP()?
> Worth documenting for future when we add new sections.

No problem, I will be adding comments at every KEEP() why it's needed.

--
vda
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Sam Ravnborg on
On Mon, Jul 26, 2010 at 02:52:57AM +0200, Denys Vlasenko wrote:
> On Friday 23 July 2010 21:10, Sam Ravnborg wrote:
> > >
> > > * modpost fix for 64k+ sections: linux-2.6.35-rc4-fs.modpost.patch
> > > This patch is in -mm, it still not reach mainline...
> > >
> >
> > Some comments below - but noting fundamental.
> >
> > + /* Fixup for more than 64k sections */
> > + info->num_sections = hdr->e_shnum;
> > + if (info->num_sections == 0) { /* more than 64k sections? */
> > + /* note: it doesn't need shndx2secindex() */
> > + info->num_sections = TO_NATIVE(sechdrs[0].sh_size);
> > + }
> > I had to read the above twice to get it.
> > How about something like this:
> >
> > /* Fixup for more than 64k sections */
> > if (hdr->e_shnum == 0) {
> > /*
> > * There are more than 64k sections,
> > * read count from .sh_size.
> > * note: it doesn't need shndx2secindex()
> > */
> > info->num_sections = TO_NATIVE(sechdrs[0].sh_size);
> > }
> > else {
> > info->num_sections = hdr->e_shnum;
> > }
> >
> >
> > + info->secindex_strings = hdr->e_shstrndx;
> > + if (info->secindex_strings == SHN_XINDEX)
> > + info->secindex_strings =
> > + shndx2secindex(TO_NATIVE(sechdrs[0].sh_link));
> >
> > Likewise here...
>
> Done both.
>
> ...
> > - for (i = 1; i < hdr->e_shnum; i++) {
> > - const char *secstrings
> > - = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
> > + secstrings = (void *)hdr + sechdrs[info->secindex_strings].sh_offset;
> >
> > Moving this assignnet out of the loop is an unrelated
> > but welcome change.
>
> I take you are ok with it?
>
> Please find updated patch below.
>
> Signed-off-by: Denys Vlasenko <vda.linux(a)googlemail.com>

If the patch get a proper changelog then it has my:

Acked-by: Sam Ravnborg <sam(a)ravnborg.org>

Sam
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/