aboutsummaryrefslogtreecommitdiffstats
path: root/patches
diff options
context:
space:
mode:
Diffstat (limited to 'patches')
-rw-r--r--patches/Makefile.am5
-rw-r--r--patches/libelf-0.7.0-hash.patch20
-rw-r--r--patches/libelf-0.7.0.patch16
-rw-r--r--patches/linux-2.4.10.patch50
4 files changed, 91 insertions, 0 deletions
diff --git a/patches/Makefile.am b/patches/Makefile.am
new file mode 100644
index 0000000..efe5d40
--- /dev/null
+++ b/patches/Makefile.am
@@ -0,0 +1,5 @@
+## Process this file with automake to create Makefile.in
+
+AUTOMAKE_OPTIONS = 1.4 gnu
+
+EXTRA_DIST = libelf-0.7.0.patch libelf-0.7.0-hash.patch linux-2.4.10.patch
diff --git a/patches/libelf-0.7.0-hash.patch b/patches/libelf-0.7.0-hash.patch
new file mode 100644
index 0000000..7fcf7f3
--- /dev/null
+++ b/patches/libelf-0.7.0-hash.patch
@@ -0,0 +1,20 @@
+2002-06-14 Jakub Jelinek <jakub@redhat.com>
+
+ * update.c (_elf64_layout): Don't overwrite sh_entsize
+ unconditionally for ELF64 - some platforms use
+ 64 bit DT_HASH entries.
+
+--- libelf-0.7.0/lib/update.c.jj Fri Jun 12 15:42:39 1998
++++ libelf-0.7.0/lib/update.c Fri Jun 14 10:22:19 2002
+@@ -317,7 +317,10 @@ _elf64_layout(Elf *elf, unsigned *flag)
+
+ entsize = scn_entsize(elf, version, shdr->sh_type);
+ if (entsize > 1) {
+- rewrite(shdr->sh_entsize, entsize, scn->s_shdr_flags);
++ /* Some architectures use 64-bit hash entries. */
++ if (shdr->sh_type != SHT_HASH
++ || shdr->sh_entsize != _fsize(elf->e_class, version, ELF_T_ADDR))
++ rewrite(shdr->sh_entsize, entsize, scn->s_shdr_flags);
+ }
+
+ if (layout) {
diff --git a/patches/libelf-0.7.0.patch b/patches/libelf-0.7.0.patch
new file mode 100644
index 0000000..1e9e57a
--- /dev/null
+++ b/patches/libelf-0.7.0.patch
@@ -0,0 +1,16 @@
+2001-09-26 Jakub Jelinek <jakub@redhat.com>
+
+ * lib/64.xlatetof.c (__load_i64M): Don't blow away upper 32 bits
+ if 31th bit is set.
+
+--- libelf-0.7.0/lib/64.xlatetof.c.jj Tue Aug 25 17:22:24 1998
++++ libelf-0.7.0/lib/64.xlatetof.c Wed Sep 26 15:00:18 2001
+@@ -42,7 +42,7 @@ __load_i64L(const unsigned char *from) {
+
+ static __libelf_i64_t
+ __load_i64M(const unsigned char *from) {
+- return ((__libelf_u64_t)__load_u32M(from) << 32) | (__libelf_i64_t)__load_i32M(from + 4);
++ return ((__libelf_i64_t)__load_i32M(from) << 32) | (__libelf_u64_t)__load_u32M(from + 4);
+ }
+
+ static void
diff --git a/patches/linux-2.4.10.patch b/patches/linux-2.4.10.patch
new file mode 100644
index 0000000..dbd6702
--- /dev/null
+++ b/patches/linux-2.4.10.patch
@@ -0,0 +1,50 @@
+--- linux/fs/binfmt_elf.c.jj Thu Sep 6 16:12:04 2001
++++ linux/fs/binfmt_elf.c Mon Oct 1 08:22:06 2001
+@@ -400,7 +400,6 @@ static int load_elf_binary(struct linux_
+ int load_addr_set = 0;
+ char * elf_interpreter = NULL;
+ unsigned int interpreter_type = INTERPRETER_NONE;
+- mm_segment_t old_fs;
+ unsigned long error;
+ struct elf_phdr * elf_ppnt, *elf_phdata;
+ unsigned long elf_bss, k, elf_brk;
+@@ -574,8 +573,6 @@ static int load_elf_binary(struct linux_
+ the image should be loaded at fixed address, not at a variable
+ address. */
+
+- old_fs = get_fs();
+- set_fs(get_ds());
+ for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
+ int elf_prot = 0, elf_flags;
+ unsigned long vaddr;
+@@ -583,6 +580,22 @@ static int load_elf_binary(struct linux_
+ if (elf_ppnt->p_type != PT_LOAD)
+ continue;
+
++ if (unlikely (elf_brk > elf_bss)) {
++ unsigned long nbyte;
++
++ /* There was a PT_LOAD segment with p_memsz > p_filesz
++ before this one. Map anonymous pages, if needed,
++ and clear the area. */
++ set_brk (elf_bss + load_bias, elf_brk + load_bias);
++ nbyte = ELF_PAGEOFFSET(elf_bss);
++ if (nbyte) {
++ nbyte = ELF_MIN_ALIGN - nbyte;
++ if (nbyte > elf_brk - elf_bss)
++ nbyte = elf_brk - elf_bss;
++ clear_user((void *) elf_bss + elf_bias, nbyte);
++ }
++ }
++
+ if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
+ if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
+ if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
+@@ -626,7 +639,6 @@ static int load_elf_binary(struct linux_
+ if (k > elf_brk)
+ elf_brk = k;
+ }
+- set_fs(old_fs);
+
+ elf_entry += load_bias;
+ elf_bss += load_bias;