[sparc] Fix misaligned tracing information (Closes: #609371)

svn path=/dists/trunk/linux-2.6/; revision=16848
This commit is contained in:
Ben Hutchings 2011-01-24 02:12:36 +00:00
parent db7f6530f1
commit 0b7edf388b
4 changed files with 423 additions and 0 deletions

2
debian/changelog vendored
View File

@ -5,6 +5,8 @@ linux-2.6 (2.6.37-1~experimental.2) UNRELEASED; urgency=low
in another build failure
* r8169: Keep firmware in memory (Closes: #609538)
* r8712u: Firmware filename is rtlwifi/rtl8712u.bin (Closes: #602450)
* [sparc] Fix misaligned tracing information which the module loader
does not support (Closes: #609371)
[ Aurelien Jarno ]
* [sh4] Export cpu_core_map to fix build failure with CONFIG_SFC=m.

View File

@ -0,0 +1,136 @@
From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Subject: introduce __u64_aligned and U64_ALIGN() for structure alignment in custom sections (v3)
Date: Fri, 21 Jan 2011 15:36:31 -0500
Problem description:
gcc happily align on 32-byte structures defined statically. Ftrace trace events
and Tracepoints both statically define structures into custom sections (using
the "section" attribute), to then assign these to symbols with the linker
scripts to iterate the these sections as an array.
However, gcc uses different alignments for these structures when they are
defined statically than when they are globally visible and/or in an array.
Therefore iteration on these arrays sees "holes" of padding. gcc is within its
rights to increase the alignment of the statically defined structures because,
normally, there should be no other accesses to them than in the local object. We
are actually iterating on the generated structures as if they were an array
without letting gcc knowing anything about it.
This patch introduces __u64_aligned to force gcc to use the u64 type and
variable alignment, up-aligning or down-aligning the target type if necessary.
The memory accesses to the target structure are efficient (does not require
bytewise memory accesses) and the atomic pointer update guarantees required by
RCU are kept. u64 is considered as the largest type that can generate a trap for
unaligned accesses (u64 on sparc32 needs to be aligned on 64-bit).
This alignment should be used for both structure definitions and declarations
(as *both* the type and variable attribute) when using the "section"
attribute to generate arrays of structures. Given that gcc only uses the type
attribute "aligned" as a lower-bound for alignment, the structures should not
contain types which require alignment larger than that of u64. The "aligned"
variable attribute, on the other hand, forces gcc to use exactly the specified
alignment.
Also introduce the linker script U64_ALIGN() macro for specification of custom
section alignment that matches that of __u64_aligned.
Changelog since v2:
- Drop the "packed" type attribute, because it causes gcc to drop the padding
between consecutive "int" and "pointer"/"long" fields, which leads to
unaligned accesses on sparc64.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
CC: David Miller <davem@davemloft.net>
CC: Steven Rostedt <rostedt@goodmis.org>
CC: Frederic Weisbecker <fweisbec@gmail.com>
CC: Ingo Molnar <mingo@elte.hu>
---
include/asm-generic/vmlinux.lds.h | 6 ++++
include/linux/align-section.h | 54 ++++++++++++++++++++++++++++++++++++++
include/linux/compiler.h | 2 +
3 files changed, 62 insertions(+)
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -57,6 +57,8 @@ extern void __chk_io_ptr(const volatile
# include <linux/compiler-intel.h>
#endif
+#include <linux/align-section.h>
+
/*
* Generic compiler-dependent macros required for kernel
* build go below this comment. Actual compiler/compiler version
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -69,6 +69,12 @@
*/
#define STRUCT_ALIGN() . = ALIGN(32)
+/*
+ * Align to a 8 byte boundary. For use with custom section made from structures
+ * declared and defined with __u64_aligned.
+ */
+#define U64_ALIGN() . = ALIGN(8)
+
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
--- /dev/null
+++ b/include/linux/align-section.h
@@ -0,0 +1,54 @@
+#ifndef _LINUX_ALIGN_SECTION_H
+#define _LINUX_ALIGN_SECTION_H
+
+/*
+ * __u64_aligned:
+ *
+ * __u64_aligned should be used as type and variable attribute for structure
+ * definitions when using the "section" attribute to generate arrays of
+ * structures. U64_ALIGN() must be used prior to these section definitions in
+ * the linker script.
+ *
+ * It forces the compiler to use the u64 type alignment, up-aligning or
+ * down-aligning the target type if necessary. The memory accesses to the target
+ * structure are efficient (does not require bytewise memory accesses) and the
+ * atomic pointer update guarantees required by RCU are kept. u64 is considered
+ * as the largest type that can generate a trap for unaligned accesses (u64 on
+ * sparc32 needs to be aligned on 64-bit).
+ *
+ * Given that gcc only uses the type attribute "aligned" as a lower-bound for
+ * alignment, the structures should not contain types which require alignment
+ * larger than that of u64. The "aligned" variable attribute, on the other hand,
+ * forces gcc to use exactly the specified alignment.
+ */
+
+/*
+ * Use __u64_aligned as type and variable attribute for custom section structure
+ * declaration and definition. It should also be applied to any static or
+ * extern definition of the structure that would override the definition to
+ * which the "section" attribute is applied, e.g.
+ *
+ * struct custom {
+ * unsigned long field;
+ * ...
+ * } __u64_aligned;
+ *
+ * extern struct __u64_aligned custom;
+ * struct custom __u64_aligned __attribute__((section("__custom")) identifier;
+ *
+ * The array can then be defined with:
+ *
+ * extern struct custom __start___custom[];
+ * extern struct custom __stop___custom[];
+ *
+ * With linking performed by the linker script:
+ *
+ * U64_ALIGN();
+ * VMLINUX_SYMBOL(__start___custom) = .;
+ * *(__custom)
+ * VMLINUX_SYMBOL(__stop___custom) = .;
+ */
+
+#define __u64_aligned __attribute__((__aligned__(__alignof__(long long))))
+
+#endif /* _LINUX_ALIGN_SECTION_H */

View File

@ -0,0 +1,283 @@
From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Subject: tracing: fix sparc64 alignment crash with __u64_aligned/U64_ALIGN()
Date: Fri, 21 Jan 2011 15:36:32 -0500
Problem description:
gcc happily align structures defined statically on 32-byte. Ftrace trace events
and Tracepoints both statically define structures into sections (using the
"section" attribute), to then assign these to symbols with the linker scripts to
iterate the these sections as an array.
However, gcc uses different alignments for these structures when they are
defined statically and when they are globally visible and/or in an array.
Therefore iteration on these arrays sees "holes" of padding.
Use the __u64_aligned for type declarations and variable definitions to ensure
that gcc:
a) iterates on the correctly aligned type. (type attribute)
b) generates the definitions within the sections with the appropriate alignment.
(variable attribute)
The Ftrace code introduced the "aligned(4)" variable attribute in commit
1473e4417c79f12d91ef91a469699bfa911f510f to try to work around this problem that
showed up on x86_64, but it causes unaligned accesses on sparc64, and is
generally a bad idea on 64-bit if RCU pointers are contained within the
structure. Moreover, it did not use the same attribute as type attribute, which
could cause the iteration on the extern array structure not to match the
variable definitions for some structure sizes.
We should also ensure proper alignment of each Ftrace section in
include/asm-generic/vmlinux.lds.h.
Moving all STRUCT_ALIGN() for FTRACE_EVENTS() and TRACE_SYSCALLS() into the
definitions, so the alignment is only done if these infrastructures are
configured in. Use U64_ALIGN instead of STRUCT_ALIGN.
Also align TRACE_PRINTKS on U64_ALIGN to make sure the beginning of the section
is aligned on pointer size.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
CC: David Miller <davem@davemloft.net>
CC: Steven Rostedt <rostedt@goodmis.org>
CC: Frederic Weisbecker <fweisbec@gmail.com>
CC: Ingo Molnar <mingo@elte.hu>
---
include/asm-generic/vmlinux.lds.h | 19 ++++++++++---------
include/linux/compiler.h | 6 +++---
include/linux/ftrace_event.h | 2 +-
include/linux/syscalls.h | 18 ++++++++----------
include/trace/ftrace.h | 8 ++++----
include/trace/syscall.h | 2 +-
kernel/trace/trace.h | 2 +-
kernel/trace/trace_export.c | 2 +-
8 files changed, 29 insertions(+), 30 deletions(-)
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -80,7 +80,7 @@ struct ftrace_branch_data {
};
unsigned long miss_hit[2];
};
-};
+} __u64_aligned;
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
@@ -96,7 +96,7 @@ void ftrace_likely_update(struct ftrace_
#define __branch_check__(x, expect) ({ \
int ______r; \
static struct ftrace_branch_data \
- __attribute__((__aligned__(4))) \
+ __u64_aligned \
__attribute__((section("_ftrace_annotated_branch"))) \
______f = { \
.func = __func__, \
@@ -131,7 +131,7 @@ void ftrace_likely_update(struct ftrace_
({ \
int ______r; \
static struct ftrace_branch_data \
- __attribute__((__aligned__(4))) \
+ __u64_aligned \
__attribute__((section("_ftrace_branch"))) \
______f = { \
.func = __func__, \
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -126,12 +126,11 @@ extern struct trace_event_functions exit
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
static struct syscall_metadata \
- __attribute__((__aligned__(4))) __syscall_meta_##sname; \
+ __u64_aligned __syscall_meta_##sname; \
static struct ftrace_event_call \
- __attribute__((__aligned__(4))) event_enter_##sname; \
+ __u64_aligned event_enter_##sname; \
static struct ftrace_event_call __used \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_events"))) \
+ __u64_aligned __attribute__((section("_ftrace_events"))) \
event_enter_##sname = { \
.name = "sys_enter"#sname, \
.class = &event_class_syscall_enter, \
@@ -141,12 +140,11 @@ extern struct trace_event_functions exit
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static struct syscall_metadata \
- __attribute__((__aligned__(4))) __syscall_meta_##sname; \
+ __u64_aligned __syscall_meta_##sname; \
static struct ftrace_event_call \
- __attribute__((__aligned__(4))) event_exit_##sname; \
+ __u64_aligned event_exit_##sname; \
static struct ftrace_event_call __used \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_events"))) \
+ __u64_aligned __attribute__((section("_ftrace_events"))) \
event_exit_##sname = { \
.name = "sys_exit"#sname, \
.class = &event_class_syscall_exit, \
@@ -158,7 +156,7 @@ extern struct trace_event_functions exit
SYSCALL_TRACE_ENTER_EVENT(sname); \
SYSCALL_TRACE_EXIT_EVENT(sname); \
static struct syscall_metadata __used \
- __attribute__((__aligned__(4))) \
+ __u64_aligned \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \
.name = "sys"#sname, \
@@ -174,7 +172,7 @@ extern struct trace_event_functions exit
SYSCALL_TRACE_ENTER_EVENT(_##sname); \
SYSCALL_TRACE_EXIT_EVENT(_##sname); \
static struct syscall_metadata __used \
- __attribute__((__aligned__(4))) \
+ __u64_aligned \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta__##sname = { \
.name = "sys_"#sname, \
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -69,7 +69,7 @@
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
static struct ftrace_event_call __used \
- __attribute__((__aligned__(4))) event_##name
+ __u64_aligned event_##name;
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
@@ -434,7 +434,7 @@ static inline notrace int ftrace_get_off
* };
*
* static struct ftrace_event_call __used
- * __attribute__((__aligned__(4)))
+ * __u64_aligned
* __attribute__((section("_ftrace_events"))) event_<call> = {
* .name = "<call>",
* .class = event_class_<template>,
@@ -567,7 +567,7 @@ static struct ftrace_event_class __used
#define DEFINE_EVENT(template, call, proto, args) \
\
static struct ftrace_event_call __used \
-__attribute__((__aligned__(4))) \
+__u64_aligned \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.class = &event_class_##template, \
@@ -581,7 +581,7 @@ __attribute__((section("_ftrace_events")
static const char print_fmt_##call[] = print; \
\
static struct ftrace_event_call __used \
-__attribute__((__aligned__(4))) \
+__u64_aligned \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.class = &event_class_##template, \
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -749,7 +749,7 @@ extern const char *__stop___trace_bprint
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
extern struct ftrace_event_call \
- __attribute__((__aligned__(4))) event_##call;
+ __u64_aligned event_##call;
#undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -156,7 +156,7 @@ struct ftrace_event_class event_class_ft
}; \
\
struct ftrace_event_call __used \
-__attribute__((__aligned__(4))) \
+__u64_aligned \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.event.type = etype, \
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -194,7 +194,7 @@ struct ftrace_event_call {
int perf_refcount;
struct hlist_head __percpu *perf_events;
#endif
-};
+} __u64_aligned;
#define PERF_MAX_TRACE_SIZE 2048
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -29,7 +29,7 @@ struct syscall_metadata {
struct ftrace_event_call *enter_event;
struct ftrace_event_call *exit_event;
-};
+} __u64_aligned;
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long arch_syscall_addr(int nr);
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -113,7 +113,8 @@
#endif
#ifdef CONFIG_TRACE_BRANCH_PROFILING
-#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
+#define LIKELY_PROFILE() U64_ALIGN(); \
+ VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
*(_ftrace_annotated_branch) \
VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
#else
@@ -121,7 +122,8 @@
#endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
-#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
+#define BRANCH_PROFILE() U64_ALIGN(); \
+ VMLINUX_SYMBOL(__start_branch_profile) = .; \
*(_ftrace_branch) \
VMLINUX_SYMBOL(__stop_branch_profile) = .;
#else
@@ -129,7 +131,8 @@
#endif
#ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
+#define FTRACE_EVENTS() U64_ALIGN(); \
+ VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
#else
@@ -137,7 +140,8 @@
#endif
#ifdef CONFIG_TRACING
-#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
+#define TRACE_PRINTKS() U64_ALIGN(); \
+ VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
*(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
#else
@@ -145,7 +149,8 @@
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
+#define TRACE_SYSCALLS() U64_ALIGN(); \
+ VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
*(__syscalls_metadata) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
@@ -175,11 +180,7 @@
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
- \
- STRUCT_ALIGN(); \
FTRACE_EVENTS() \
- \
- STRUCT_ALIGN(); \
TRACE_SYSCALLS()
/*

View File

@ -3,3 +3,5 @@
+ bugfix/all/r8169-keep-firmware-in-memory.patch
- features/all/r8712u-Fix-external-firmware-loading.patch
+ features/all/r8712u-Firmware-changes-for-driver.patch
+ bugfix/sparc/introduce-u64-aligned.patch
+ bugfix/sparc/tracing-use-u64-aligned-as-type-and-variable-attribute.patch