aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDouglas Raillard <douglas.raillard@arm.com>2021-10-28 13:27:10 +0100
committerArnaldo Carvalho de Melo <acme@redhat.com>2021-10-28 10:22:17 -0300
commit696c62180455fd291cd12086eeeee4ff400fbd86 (patch)
tree943620ea7aa0a634383c66aa2d675499e8810754
parent48f4086b766d22145f1ed6f60b782881b6871945 (diff)
downloadpahole-696c62180455fd291cd12086eeeee4ff400fbd86.tar.gz
btf_loader: Use cacheline size to infer alignment
When the alignment is larger than natural, it is very likely that the source code was using the cacheline size. Therefore, use the cacheline size when it would only result in increasing the alignment. Committer tests: This is one of the cases that this heuristic works well, 'struct Qdisc' in the Linux kernel: --- /tmp/btfdiff.dwarf.pXdgRU 2021-10-28 10:22:11.738200232 -0300 +++ /tmp/btfdiff.btf.bkDkdf 2021-10-28 10:22:11.925205061 -0300 @@ -107,7 +107,7 @@ struct Qdisc { /* XXX 24 bytes hole, try to pack */ /* --- cacheline 2 boundary (128 bytes) --- */ - struct sk_buff_head gso_skb __attribute__((__aligned__(64))); /* 128 24 */ + struct sk_buff_head gso_skb __attribute__((__aligned__(32))); /* 128 24 */ struct qdisc_skb_head q; /* 152 24 */ struct gnet_stats_basic_packed bstats; /* 176 16 */ /* --- cacheline 3 boundary (192 bytes) --- */ With this patch both DWARF and BTF generated output have the same alignment. Suggested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Douglas Raillard <douglas.raillard@arm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: dwarves@vger.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--btf_loader.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/btf_loader.c b/btf_loader.c
index e500eaee..7a5b16ff 100644
--- a/btf_loader.c
+++ b/btf_loader.c
@@ -476,6 +476,7 @@ static uint32_t class__infer_alignment(const struct conf_load *conf,
uint32_t natural_alignment,
uint32_t smallest_offset)
{
+ uint16_t cacheline_size = conf->conf_fprintf->cacheline_size;
uint32_t alignment = 0;
uint32_t offset_delta = byte_offset - smallest_offset;
@@ -494,6 +495,15 @@ static uint32_t class__infer_alignment(const struct conf_load *conf,
/* Natural alignment, nothing to do */
if (alignment <= natural_alignment || alignment == 1)
alignment = 0;
+ /* If the offset is compatible with being aligned on the cacheline size
+ * and this would only result in increasing the alignment, use the
+ * cacheline size as it is safe and quite likely to be what was in the
+ * source.
+ */
+ else if (alignment < cacheline_size &&
+ cacheline_size % alignment == 0 &&
+ byte_offset % cacheline_size == 0)
+ alignment = cacheline_size;
return alignment;
}