ring_buffer refcount overflow false positive
Posted: Thu Apr 02, 2015 10:54 am
I think I found a false positive case of refcount overflow which is not covered in either 3.2 and 3.19 patches that we encountered on our system while doing long running stability testing.
This is on ARM Kernel 3.4 but it probably applies to all arch variants.
This is in kernel/trace/ring_buffer.c
This one points to location 0x3c = +60 offset which is line bkpt 0xf103 and it's the second inc line which means it's the local_inc(&cpu_buffer->commits) part where this value is always incrementing
This is on ARM Kernel 3.4 but it probably applies to all arch variants.
This is in kernel/trace/ring_buffer.c
- Code: Select all
[296116.036693] PAX: refcount overflow occured at: rb_reserve_next_event+0x3c/0x3c8
- Code: Select all
static struct ring_buffer_event *
rb_reserve_next_event(struct ring_buffer *buffer,
struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length)
{
struct ring_buffer_event *event;
u64 ts, delta;
int nr_loops = 0;
int add_timestamp;
u64 diff;
rb_start_commit(cpu_buffer);
...
}
static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
{
local_inc(&cpu_buffer->committing);
local_inc(&cpu_buffer->commits);
}
static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned long commits;
if (RB_WARN_ON(cpu_buffer,
!local_read(&cpu_buffer->committing)))
return;
again:
commits = local_read_unchecked(&cpu_buffer->commits);
/* synchronize with interrupts */
barrier();
if (local_read(&cpu_buffer->committing) == 1)
rb_set_commit_to_write(cpu_buffer);
local_dec(&cpu_buffer->committing);
/* synchronize with interrupts */
barrier();
/*
* Need to account for interrupts coming in between the
* updating of the commit page and the clearing of the
* committing counter.
*/
if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
!local_read(&cpu_buffer->committing)) {
local_inc(&cpu_buffer->committing);
goto again;
}
}
This one points to location 0x3c = +60 offset which is line bkpt 0xf103 and it's the second inc line which means it's the local_inc(&cpu_buffer->commits) part where this value is always incrementing
- Code: Select all
(gdb) list *(rb_reserve_next_event+0x3c)
0x2b24 is in rb_reserve_next_event (/<mask>/kernel/arch/arm/include/asm/atomic.h:62).
57 static inline void atomic_add(int i, atomic_t *v)
58 {
59 unsigned long tmp;
60 int result;
61
62 __asm__ __volatile__("@ atomic_add\n"
63 "1: ldrex %1, [%3]\n"
64 " adds %0, %1, %4\n"
65
66 #ifdef CONFIG_PAX_REFCOUNT
- Code: Select all
(gdb) disassemble rb_reserve_next_event
Dump of assembler code for function rb_reserve_next_event:
0x00002ae8 <+0>: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
0x00002aec <+4>: mov r10, r0
0x00002af0 <+8>: sub sp, sp, #60 ; 0x3c
0x00002af4 <+12>: add r8, r0, #76 ; 0x4c
0x00002af8 <+16>: ldrex r2, [r8]
0x00002afc <+20>: adds r3, r2, #1
0x00002b00 <+24>: bvc 0x2b08 <rb_reserve_next_event+32>
0x00002b04 <+28>: bkpt 0xf103
0x00002b08 <+32>: strex r2, r3, [r8]
0x00002b0c <+36>: teq r2, #0
0x00002b10 <+40>: bne 0x2af8 <rb_reserve_next_event+16>
0x00002b14 <+44>: add r3, r0, #80 ; 0x50
0x00002b18 <+48>: ldrex r0, [r3]
0x00002b1c <+52>: adds r2, r0, #1
0x00002b20 <+56>: bvc 0x2b28 <rb_reserve_next_event+64>
0x00002b24 <+60>: bkpt 0xf103
0x00002b28 <+64>: strex r0, r2, [r3]
0x00002b2c <+68>: teq r0, #0
0x00002b30 <+72>: bne 0x2b18 <rb_reserve_next_event+48>
0x00002b34 <+76>: cmp r1, #0
0x00002b38 <+80>: moveq r1, #1
0x00002b3c <+84>: beq 0x2b48 <rb_reserve_next_event+96>
0x00002b40 <+88>: cmp r1, #112 ; 0x70
...