|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* ring_buffer_frontend.c
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* This library is free software; you can redistribute it and/or
|
|
Packit |
c04fcb |
* modify it under the terms of the GNU Lesser General Public
|
|
Packit |
c04fcb |
* License as published by the Free Software Foundation; only
|
|
Packit |
c04fcb |
* version 2.1 of the License.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* This library is distributed in the hope that it will be useful,
|
|
Packit |
c04fcb |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
Packit |
c04fcb |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Packit |
c04fcb |
* Lesser General Public License for more details.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* You should have received a copy of the GNU Lesser General Public
|
|
Packit |
c04fcb |
* License along with this library; if not, write to the Free Software
|
|
Packit |
c04fcb |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Ring buffer wait-free buffer synchronization. Producer-consumer and flight
|
|
Packit |
c04fcb |
* recorder (overwrite) modes. See thesis:
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
|
|
Packit |
c04fcb |
* dissertation, Ecole Polytechnique de Montreal.
|
|
Packit |
c04fcb |
* http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* - Algorithm presentation in Chapter 5:
|
|
Packit |
c04fcb |
* "Lockless Multi-Core High-Throughput Buffering".
|
|
Packit |
c04fcb |
* - Algorithm formal verification in Section 8.6:
|
|
Packit |
c04fcb |
* "Formal verification of LTTng"
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Author:
|
|
Packit |
c04fcb |
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Inspired from LTT and RelayFS:
|
|
Packit |
c04fcb |
* Karim Yaghmour <karim@opersys.com>
|
|
Packit |
c04fcb |
* Tom Zanussi <zanussi@us.ibm.com>
|
|
Packit |
c04fcb |
* Bob Wisniewski <bob@watson.ibm.com>
|
|
Packit |
c04fcb |
* And from K42 :
|
|
Packit |
c04fcb |
* Bob Wisniewski <bob@watson.ibm.com>
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Buffer reader semantic :
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* - get_subbuf_size
|
|
Packit |
c04fcb |
* while buffer is not finalized and empty
|
|
Packit |
c04fcb |
* - get_subbuf
|
|
Packit |
c04fcb |
* - if return value != 0, continue
|
|
Packit |
c04fcb |
* - splice one subbuffer worth of data to a pipe
|
|
Packit |
c04fcb |
* - splice the data from pipe to disk/network
|
|
Packit |
c04fcb |
* - put_subbuf
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
#define _GNU_SOURCE
|
|
Packit |
c04fcb |
#include <sys/types.h>
|
|
Packit |
c04fcb |
#include <sys/mman.h>
|
|
Packit |
c04fcb |
#include <sys/stat.h>
|
|
Packit |
c04fcb |
#include <unistd.h>
|
|
Packit |
c04fcb |
#include <fcntl.h>
|
|
Packit |
c04fcb |
#include <signal.h>
|
|
Packit |
c04fcb |
#include <time.h>
|
|
Packit |
c04fcb |
#include <urcu/compiler.h>
|
|
Packit |
c04fcb |
#include <urcu/ref.h>
|
|
Packit |
c04fcb |
#include <urcu/tls-compat.h>
|
|
Packit |
c04fcb |
#include <poll.h>
|
|
Packit |
c04fcb |
#include <helper.h>
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
#include "smp.h"
|
|
Packit |
c04fcb |
#include <lttng/ringbuffer-config.h>
|
|
Packit |
c04fcb |
#include "vatomic.h"
|
|
Packit |
c04fcb |
#include "backend.h"
|
|
Packit |
c04fcb |
#include "frontend.h"
|
|
Packit |
c04fcb |
#include "shm.h"
|
|
Packit |
c04fcb |
#include "tlsfixup.h"
|
|
Packit |
c04fcb |
#include "../liblttng-ust/compat.h" /* For ENODATA */
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Print DBG() messages about events lost only every 1048576 hits */
|
|
Packit |
c04fcb |
#define DBG_PRINT_NR_LOST (1UL << 20)
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
#define LTTNG_UST_RB_SIG_FLUSH SIGRTMIN
|
|
Packit |
c04fcb |
#define LTTNG_UST_RB_SIG_READ SIGRTMIN + 1
|
|
Packit |
c04fcb |
#define LTTNG_UST_RB_SIG_TEARDOWN SIGRTMIN + 2
|
|
Packit |
c04fcb |
#define CLOCKID CLOCK_MONOTONIC
|
|
Packit |
c04fcb |
#define LTTNG_UST_RING_BUFFER_GET_RETRY 10
|
|
Packit |
c04fcb |
#define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Non-static to ensure the compiler does not optimize away the xor.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Use POSIX SHM: shm_open(3) and shm_unlink(3).
|
|
Packit |
c04fcb |
* close(2) to close the fd returned by shm_open.
|
|
Packit |
c04fcb |
* shm_unlink releases the shared memory object name.
|
|
Packit |
c04fcb |
* ftruncate(2) sets the size of the memory object.
|
|
Packit |
c04fcb |
* mmap/munmap maps the shared memory obj to a virtual address in the
|
|
Packit |
c04fcb |
* calling proceess (should be done both in libust and consumer).
|
|
Packit |
c04fcb |
* See shm_overview(7) for details.
|
|
Packit |
c04fcb |
* Pass file descriptor returned by shm_open(3) to ltt-sessiond through
|
|
Packit |
c04fcb |
* a UNIX socket.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Since we don't need to access the object using its name, we can
|
|
Packit |
c04fcb |
* immediately shm_unlink(3) it, and only keep the handle with its file
|
|
Packit |
c04fcb |
* descriptor.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Internal structure representing offsets to use at a sub-buffer switch.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
struct switch_offsets {
|
|
Packit |
c04fcb |
unsigned long begin, end, old;
|
|
Packit |
c04fcb |
size_t pre_header_padding, size;
|
|
Packit |
c04fcb |
unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
|
|
Packit |
c04fcb |
switch_old_end:1;
|
|
Packit |
c04fcb |
};
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* wakeup_fd_mutex protects wakeup fd use by timer from concurrent
|
|
Packit |
c04fcb |
* close.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static pthread_mutex_t wakeup_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_print_errors(struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf, int cpu,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Handle timer teardown race wrt memory free of private data by
|
|
Packit |
c04fcb |
* ring buffer signals are handled by a single thread, which permits
|
|
Packit |
c04fcb |
* a synchronization point between handling of each signal.
|
|
Packit |
c04fcb |
* Protected by the lock within the structure.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
struct timer_signal_data {
|
|
Packit |
c04fcb |
pthread_t tid; /* thread id managing signals */
|
|
Packit |
c04fcb |
int setup_done;
|
|
Packit |
c04fcb |
int qs_done;
|
|
Packit |
c04fcb |
pthread_mutex_t lock;
|
|
Packit |
c04fcb |
};
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static struct timer_signal_data timer_signal = {
|
|
Packit |
c04fcb |
.tid = 0,
|
|
Packit |
c04fcb |
.setup_done = 0,
|
|
Packit |
c04fcb |
.qs_done = 0,
|
|
Packit |
c04fcb |
.lock = PTHREAD_MUTEX_INITIALIZER,
|
|
Packit |
c04fcb |
};
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* lib_ring_buffer_reset - Reset ring buffer to initial values.
|
|
Packit |
c04fcb |
* @buf: Ring buffer.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Effectively empty the ring buffer. Should be called when the buffer is not
|
|
Packit |
c04fcb |
* used for writing. The ring buffer can be opened for reading, but the reader
|
|
Packit |
c04fcb |
* should not be using the iterator concurrently with reset. The previous
|
|
Packit |
c04fcb |
* current iterator record is reset.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct channel *chan;
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config;
|
|
Packit |
c04fcb |
unsigned int i;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
chan = shmp(handle, buf->backend.chan);
|
|
Packit |
c04fcb |
if (!chan)
|
|
Packit |
c04fcb |
abort();
|
|
Packit |
c04fcb |
config = &chan->backend.config;
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Reset iterator first. It will put the subbuffer if it currently holds
|
|
Packit |
c04fcb |
* it.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
v_set(config, &buf->offset, 0);
|
|
Packit |
c04fcb |
for (i = 0; i < chan->backend.num_subbuf; i++) {
|
|
Packit |
c04fcb |
v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
|
|
Packit |
c04fcb |
v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
|
|
Packit |
c04fcb |
v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
uatomic_set(&buf->consumed, 0);
|
|
Packit |
c04fcb |
uatomic_set(&buf->record_disabled, 0);
|
|
Packit |
c04fcb |
v_set(config, &buf->last_tsc, 0);
|
|
Packit |
c04fcb |
lib_ring_buffer_backend_reset(&buf->backend, handle);
|
|
Packit |
c04fcb |
/* Don't reset number of active readers */
|
|
Packit |
c04fcb |
v_set(config, &buf->records_lost_full, 0);
|
|
Packit |
c04fcb |
v_set(config, &buf->records_lost_wrap, 0);
|
|
Packit |
c04fcb |
v_set(config, &buf->records_lost_big, 0);
|
|
Packit |
c04fcb |
v_set(config, &buf->records_count, 0);
|
|
Packit |
c04fcb |
v_set(config, &buf->records_overrun, 0);
|
|
Packit |
c04fcb |
buf->finalized = 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* channel_reset - Reset channel to initial values.
|
|
Packit |
c04fcb |
* @chan: Channel.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Effectively empty the channel. Should be called when the channel is not used
|
|
Packit |
c04fcb |
* for writing. The channel can be opened for reading, but the reader should not
|
|
Packit |
c04fcb |
* be using the iterator concurrently with reset. The previous current iterator
|
|
Packit |
c04fcb |
* record is reset.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
void channel_reset(struct channel *chan)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Reset iterators first. Will put the subbuffer if held for reading.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
uatomic_set(&chan->record_disabled, 0);
|
|
Packit |
c04fcb |
/* Don't reset commit_count_mask, still valid */
|
|
Packit |
c04fcb |
channel_backend_reset(&chan->backend);
|
|
Packit |
c04fcb |
/* Don't reset switch/read timer interval */
|
|
Packit |
c04fcb |
/* Don't reset notifiers and notifier enable bits */
|
|
Packit |
c04fcb |
/* Don't reset reader reference count */
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
|
|
Packit |
c04fcb |
struct lttng_crash_abi *crash_abi,
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel_backend *chanb,
|
|
Packit |
c04fcb |
struct shm_object *shmobj,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
int i;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++)
|
|
Packit |
c04fcb |
crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF;
|
|
Packit |
c04fcb |
crash_abi->mmap_length = shmobj->memory_map_size;
|
|
Packit |
c04fcb |
crash_abi->endian = RB_CRASH_ENDIAN;
|
|
Packit |
c04fcb |
crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR;
|
|
Packit |
c04fcb |
crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR;
|
|
Packit |
c04fcb |
crash_abi->word_size = sizeof(unsigned long);
|
|
Packit |
c04fcb |
crash_abi->layout_type = LTTNG_CRASH_TYPE_UST;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Offset of fields */
|
|
Packit |
c04fcb |
crash_abi->offset.prod_offset =
|
|
Packit |
c04fcb |
(uint32_t) ((char *) &buf->offset - (char *) buf);
|
|
Packit |
c04fcb |
crash_abi->offset.consumed_offset =
|
|
Packit |
c04fcb |
(uint32_t) ((char *) &buf->consumed - (char *) buf);
|
|
Packit |
c04fcb |
crash_abi->offset.commit_hot_array =
|
|
Packit |
c04fcb |
(uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf);
|
|
Packit |
c04fcb |
crash_abi->offset.commit_hot_seq =
|
|
Packit |
c04fcb |
offsetof(struct commit_counters_hot, seq);
|
|
Packit |
c04fcb |
crash_abi->offset.buf_wsb_array =
|
|
Packit |
c04fcb |
(uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
|
|
Packit |
c04fcb |
crash_abi->offset.buf_wsb_id =
|
|
Packit |
c04fcb |
offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
|
|
Packit |
c04fcb |
crash_abi->offset.sb_array =
|
|
Packit |
c04fcb |
(uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
|
|
Packit |
c04fcb |
crash_abi->offset.sb_array_shmp_offset =
|
|
Packit |
c04fcb |
offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
|
|
Packit |
c04fcb |
shmp._ref.offset);
|
|
Packit |
c04fcb |
crash_abi->offset.sb_backend_p_offset =
|
|
Packit |
c04fcb |
offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
|
|
Packit |
c04fcb |
p._ref.offset);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Field length */
|
|
Packit |
c04fcb |
crash_abi->length.prod_offset = sizeof(buf->offset);
|
|
Packit |
c04fcb |
crash_abi->length.consumed_offset = sizeof(buf->consumed);
|
|
Packit |
c04fcb |
crash_abi->length.commit_hot_seq =
|
|
Packit |
c04fcb |
sizeof(((struct commit_counters_hot *) NULL)->seq);
|
|
Packit |
c04fcb |
crash_abi->length.buf_wsb_id =
|
|
Packit |
c04fcb |
sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
|
|
Packit |
c04fcb |
crash_abi->length.sb_array_shmp_offset =
|
|
Packit |
c04fcb |
sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
|
|
Packit |
c04fcb |
crash_abi->length.sb_backend_p_offset =
|
|
Packit |
c04fcb |
sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Array stride */
|
|
Packit |
c04fcb |
crash_abi->stride.commit_hot_array =
|
|
Packit |
c04fcb |
sizeof(struct commit_counters_hot);
|
|
Packit |
c04fcb |
crash_abi->stride.buf_wsb_array =
|
|
Packit |
c04fcb |
sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
|
|
Packit |
c04fcb |
crash_abi->stride.sb_array =
|
|
Packit |
c04fcb |
sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Buffer constants */
|
|
Packit |
c04fcb |
crash_abi->buf_size = chanb->buf_size;
|
|
Packit |
c04fcb |
crash_abi->subbuf_size = chanb->subbuf_size;
|
|
Packit |
c04fcb |
crash_abi->num_subbuf = chanb->num_subbuf;
|
|
Packit |
c04fcb |
crash_abi->mode = (uint32_t) chanb->config.mode;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->cb.content_size_field) {
|
|
Packit |
c04fcb |
size_t offset, length;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
config->cb.content_size_field(config, &offset, &length);
|
|
Packit |
c04fcb |
crash_abi->offset.content_size = offset;
|
|
Packit |
c04fcb |
crash_abi->length.content_size = length;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
crash_abi->offset.content_size = 0;
|
|
Packit |
c04fcb |
crash_abi->length.content_size = 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
if (config->cb.packet_size_field) {
|
|
Packit |
c04fcb |
size_t offset, length;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
config->cb.packet_size_field(config, &offset, &length);
|
|
Packit |
c04fcb |
crash_abi->offset.packet_size = offset;
|
|
Packit |
c04fcb |
crash_abi->length.packet_size = length;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
crash_abi->offset.packet_size = 0;
|
|
Packit |
c04fcb |
crash_abi->length.packet_size = 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Must be called under cpu hotplug protection.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel_backend *chanb, int cpu,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle,
|
|
Packit |
c04fcb |
struct shm_object *shmobj)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
|
|
Packit |
c04fcb |
struct channel *chan = caa_container_of(chanb, struct channel, backend);
|
|
Packit |
c04fcb |
void *priv = channel_get_private(chan);
|
|
Packit |
c04fcb |
size_t subbuf_header_size;
|
|
Packit |
c04fcb |
uint64_t tsc;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Test for cpu hotplug */
|
|
Packit |
c04fcb |
if (buf->backend.allocated)
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
align_shm(shmobj, __alignof__(struct commit_counters_hot));
|
|
Packit |
c04fcb |
set_shmp(buf->commit_hot,
|
|
Packit |
c04fcb |
zalloc_shm(shmobj,
|
|
Packit |
c04fcb |
sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
|
|
Packit |
c04fcb |
if (!shmp(handle, buf->commit_hot)) {
|
|
Packit |
c04fcb |
return -ENOMEM;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
align_shm(shmobj, __alignof__(struct commit_counters_cold));
|
|
Packit |
c04fcb |
set_shmp(buf->commit_cold,
|
|
Packit |
c04fcb |
zalloc_shm(shmobj,
|
|
Packit |
c04fcb |
sizeof(struct commit_counters_cold) * chan->backend.num_subbuf));
|
|
Packit |
c04fcb |
if (!shmp(handle, buf->commit_cold)) {
|
|
Packit |
c04fcb |
ret = -ENOMEM;
|
|
Packit |
c04fcb |
goto free_commit;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
|
|
Packit |
c04fcb |
cpu, handle, shmobj);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
goto free_init;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Write the subbuffer header for first subbuffer so we know the total
|
|
Packit |
c04fcb |
* duration of data gathering.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
subbuf_header_size = config->cb.subbuffer_header_size();
|
|
Packit |
c04fcb |
v_set(config, &buf->offset, subbuf_header_size);
|
|
Packit |
c04fcb |
subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
|
|
Packit |
c04fcb |
tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
|
|
Packit |
c04fcb |
config->cb.buffer_begin(buf, tsc, 0, handle);
|
|
Packit |
c04fcb |
v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
|
|
Packit |
c04fcb |
v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->seq);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->cb.buffer_create) {
|
|
Packit |
c04fcb |
ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
|
|
Packit |
c04fcb |
if (ret)
|
|
Packit |
c04fcb |
goto free_chanbuf;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
buf->backend.allocated = 1;
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Error handling */
|
|
Packit |
c04fcb |
free_init:
|
|
Packit |
c04fcb |
/* commit_cold will be freed by shm teardown */
|
|
Packit |
c04fcb |
free_commit:
|
|
Packit |
c04fcb |
/* commit_hot will be freed by shm teardown */
|
|
Packit |
c04fcb |
free_chanbuf:
|
|
Packit |
c04fcb |
return ret;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_channel_switch_timer(int sig, siginfo_t *si, void *uc)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config;
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle;
|
|
Packit |
c04fcb |
struct channel *chan;
|
|
Packit |
c04fcb |
int cpu;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
chan = si->si_value.sival_ptr;
|
|
Packit |
c04fcb |
handle = chan->handle;
|
|
Packit |
c04fcb |
config = &chan->backend.config;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
DBG("Switch timer for channel %p\n", chan);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Only flush buffers periodically if readers are active.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
pthread_mutex_lock(&wakeup_fd_mutex);
|
|
Packit |
c04fcb |
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
|
|
Packit |
c04fcb |
for_each_possible_cpu(cpu) {
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf =
|
|
Packit |
c04fcb |
shmp(handle, chan->backend.buf[cpu].shmp);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (!buf)
|
|
Packit |
c04fcb |
abort();
|
|
Packit |
c04fcb |
if (uatomic_read(&buf->active_readers))
|
|
Packit |
c04fcb |
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
|
|
Packit |
c04fcb |
chan->handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf =
|
|
Packit |
c04fcb |
shmp(handle, chan->backend.buf[0].shmp);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (!buf)
|
|
Packit |
c04fcb |
abort();
|
|
Packit |
c04fcb |
if (uatomic_read(&buf->active_readers))
|
|
Packit |
c04fcb |
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
|
|
Packit |
c04fcb |
chan->handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
pthread_mutex_unlock(&wakeup_fd_mutex);
|
|
Packit |
c04fcb |
return;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_channel_do_read(struct channel *chan)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config;
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle;
|
|
Packit |
c04fcb |
int cpu;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
handle = chan->handle;
|
|
Packit |
c04fcb |
config = &chan->backend.config;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Only flush buffers periodically if readers are active.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
pthread_mutex_lock(&wakeup_fd_mutex);
|
|
Packit |
c04fcb |
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
|
|
Packit |
c04fcb |
for_each_possible_cpu(cpu) {
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf =
|
|
Packit |
c04fcb |
shmp(handle, chan->backend.buf[cpu].shmp);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (!buf)
|
|
Packit |
c04fcb |
abort();
|
|
Packit |
c04fcb |
if (uatomic_read(&buf->active_readers)
|
|
Packit |
c04fcb |
&& lib_ring_buffer_poll_deliver(config, buf,
|
|
Packit |
c04fcb |
chan, handle)) {
|
|
Packit |
c04fcb |
lib_ring_buffer_wakeup(buf, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf =
|
|
Packit |
c04fcb |
shmp(handle, chan->backend.buf[0].shmp);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (!buf)
|
|
Packit |
c04fcb |
abort();
|
|
Packit |
c04fcb |
if (uatomic_read(&buf->active_readers)
|
|
Packit |
c04fcb |
&& lib_ring_buffer_poll_deliver(config, buf,
|
|
Packit |
c04fcb |
chan, handle)) {
|
|
Packit |
c04fcb |
lib_ring_buffer_wakeup(buf, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
pthread_mutex_unlock(&wakeup_fd_mutex);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_channel_read_timer(int sig, siginfo_t *si, void *uc)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct channel *chan;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
|
|
Packit |
c04fcb |
chan = si->si_value.sival_ptr;
|
|
Packit |
c04fcb |
DBG("Read timer for channel %p\n", chan);
|
|
Packit |
c04fcb |
lib_ring_buffer_channel_do_read(chan);
|
|
Packit |
c04fcb |
return;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void rb_setmask(sigset_t *mask)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ret = sigemptyset(mask);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
PERROR("sigemptyset");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
ret = sigaddset(mask, LTTNG_UST_RB_SIG_FLUSH);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
PERROR("sigaddset");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
ret = sigaddset(mask, LTTNG_UST_RB_SIG_READ);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
PERROR("sigaddset");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
ret = sigaddset(mask, LTTNG_UST_RB_SIG_TEARDOWN);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
PERROR("sigaddset");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void *sig_thread(void *arg)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
sigset_t mask;
|
|
Packit |
c04fcb |
siginfo_t info;
|
|
Packit |
c04fcb |
int signr;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Only self thread will receive signal mask. */
|
|
Packit |
c04fcb |
rb_setmask(&mask);
|
|
Packit |
c04fcb |
CMM_STORE_SHARED(timer_signal.tid, pthread_self());
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
for (;;) {
|
|
Packit |
c04fcb |
signr = sigwaitinfo(&mask, &info;;
|
|
Packit |
c04fcb |
if (signr == -1) {
|
|
Packit |
c04fcb |
if (errno != EINTR)
|
|
Packit |
c04fcb |
PERROR("sigwaitinfo");
|
|
Packit |
c04fcb |
continue;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
if (signr == LTTNG_UST_RB_SIG_FLUSH) {
|
|
Packit |
c04fcb |
lib_ring_buffer_channel_switch_timer(info.si_signo,
|
|
Packit |
c04fcb |
&info, NULL);
|
|
Packit |
c04fcb |
} else if (signr == LTTNG_UST_RB_SIG_READ) {
|
|
Packit |
c04fcb |
lib_ring_buffer_channel_read_timer(info.si_signo,
|
|
Packit |
c04fcb |
&info, NULL);
|
|
Packit |
c04fcb |
} else if (signr == LTTNG_UST_RB_SIG_TEARDOWN) {
|
|
Packit |
c04fcb |
cmm_smp_mb();
|
|
Packit |
c04fcb |
CMM_STORE_SHARED(timer_signal.qs_done, 1);
|
|
Packit |
c04fcb |
cmm_smp_mb();
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
ERR("Unexptected signal %d\n", info.si_signo);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Ensure only a single thread listens on the timer signal.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_setup_timer_thread(void)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
pthread_t thread;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
pthread_mutex_lock(&timer_signal.lock);
|
|
Packit |
c04fcb |
if (timer_signal.setup_done)
|
|
Packit |
c04fcb |
goto end;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ret = pthread_create(&thread, NULL, &sig_thread, NULL);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
errno = ret;
|
|
Packit |
c04fcb |
PERROR("pthread_create");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
ret = pthread_detach(thread);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
errno = ret;
|
|
Packit |
c04fcb |
PERROR("pthread_detach");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
timer_signal.setup_done = 1;
|
|
Packit |
c04fcb |
end:
|
|
Packit |
c04fcb |
pthread_mutex_unlock(&timer_signal.lock);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Wait for signal-handling thread quiescent state.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_wait_signal_thread_qs(unsigned int signr)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
sigset_t pending_set;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* We need to be the only thread interacting with the thread
|
|
Packit |
c04fcb |
* that manages signals for teardown synchronization.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
pthread_mutex_lock(&timer_signal.lock);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Ensure we don't have any signal queued for this channel.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
for (;;) {
|
|
Packit |
c04fcb |
ret = sigemptyset(&pending_set);
|
|
Packit |
c04fcb |
if (ret == -1) {
|
|
Packit |
c04fcb |
PERROR("sigemptyset");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
ret = sigpending(&pending_set);
|
|
Packit |
c04fcb |
if (ret == -1) {
|
|
Packit |
c04fcb |
PERROR("sigpending");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
if (!sigismember(&pending_set, signr))
|
|
Packit |
c04fcb |
break;
|
|
Packit |
c04fcb |
caa_cpu_relax();
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* From this point, no new signal handler will be fired that
|
|
Packit |
c04fcb |
* would try to access "chan". However, we still need to wait
|
|
Packit |
c04fcb |
* for any currently executing handler to complete.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
cmm_smp_mb();
|
|
Packit |
c04fcb |
CMM_STORE_SHARED(timer_signal.qs_done, 0);
|
|
Packit |
c04fcb |
cmm_smp_mb();
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Kill with LTTNG_UST_RB_SIG_TEARDOWN, so signal management
|
|
Packit |
c04fcb |
* thread wakes up.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
kill(getpid(), LTTNG_UST_RB_SIG_TEARDOWN);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
while (!CMM_LOAD_SHARED(timer_signal.qs_done))
|
|
Packit |
c04fcb |
caa_cpu_relax();
|
|
Packit |
c04fcb |
cmm_smp_mb();
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
pthread_mutex_unlock(&timer_signal.lock);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_channel_switch_timer_start(struct channel *chan)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct sigevent sev;
|
|
Packit |
c04fcb |
struct itimerspec its;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (!chan->switch_timer_interval || chan->switch_timer_enabled)
|
|
Packit |
c04fcb |
return;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
chan->switch_timer_enabled = 1;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
lib_ring_buffer_setup_timer_thread();
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
sev.sigev_notify = SIGEV_SIGNAL;
|
|
Packit |
c04fcb |
sev.sigev_signo = LTTNG_UST_RB_SIG_FLUSH;
|
|
Packit |
c04fcb |
sev.sigev_value.sival_ptr = chan;
|
|
Packit |
c04fcb |
ret = timer_create(CLOCKID, &sev, &chan->switch_timer);
|
|
Packit |
c04fcb |
if (ret == -1) {
|
|
Packit |
c04fcb |
PERROR("timer_create");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
its.it_value.tv_sec = chan->switch_timer_interval / 1000000;
|
|
Packit |
c04fcb |
its.it_value.tv_nsec = (chan->switch_timer_interval % 1000000) * 1000;
|
|
Packit |
c04fcb |
its.it_interval.tv_sec = its.it_value.tv_sec;
|
|
Packit |
c04fcb |
its.it_interval.tv_nsec = its.it_value.tv_nsec;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ret = timer_settime(chan->switch_timer, 0, &its, NULL);
|
|
Packit |
c04fcb |
if (ret == -1) {
|
|
Packit |
c04fcb |
PERROR("timer_settime");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_channel_switch_timer_stop(struct channel *chan)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (!chan->switch_timer_interval || !chan->switch_timer_enabled)
|
|
Packit |
c04fcb |
return;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ret = timer_delete(chan->switch_timer);
|
|
Packit |
c04fcb |
if (ret == -1) {
|
|
Packit |
c04fcb |
PERROR("timer_delete");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_FLUSH);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
chan->switch_timer = 0;
|
|
Packit |
c04fcb |
chan->switch_timer_enabled = 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_channel_read_timer_start(struct channel *chan)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
struct sigevent sev;
|
|
Packit |
c04fcb |
struct itimerspec its;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|
|
Packit |
c04fcb |
|| !chan->read_timer_interval || chan->read_timer_enabled)
|
|
Packit |
c04fcb |
return;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
chan->read_timer_enabled = 1;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
lib_ring_buffer_setup_timer_thread();
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
sev.sigev_notify = SIGEV_SIGNAL;
|
|
Packit |
c04fcb |
sev.sigev_signo = LTTNG_UST_RB_SIG_READ;
|
|
Packit |
c04fcb |
sev.sigev_value.sival_ptr = chan;
|
|
Packit |
c04fcb |
ret = timer_create(CLOCKID, &sev, &chan->read_timer);
|
|
Packit |
c04fcb |
if (ret == -1) {
|
|
Packit |
c04fcb |
PERROR("timer_create");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
its.it_value.tv_sec = chan->read_timer_interval / 1000000;
|
|
Packit |
c04fcb |
its.it_value.tv_nsec = (chan->read_timer_interval % 1000000) * 1000;
|
|
Packit |
c04fcb |
its.it_interval.tv_sec = its.it_value.tv_sec;
|
|
Packit |
c04fcb |
its.it_interval.tv_nsec = its.it_value.tv_nsec;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ret = timer_settime(chan->read_timer, 0, &its, NULL);
|
|
Packit |
c04fcb |
if (ret == -1) {
|
|
Packit |
c04fcb |
PERROR("timer_settime");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_channel_read_timer_stop(struct channel *chan)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|
|
Packit |
c04fcb |
|| !chan->read_timer_interval || !chan->read_timer_enabled)
|
|
Packit |
c04fcb |
return;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ret = timer_delete(chan->read_timer);
|
|
Packit |
c04fcb |
if (ret == -1) {
|
|
Packit |
c04fcb |
PERROR("timer_delete");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* do one more check to catch data that has been written in the last
|
|
Packit |
c04fcb |
* timer period.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
lib_ring_buffer_channel_do_read(chan);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_READ);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
chan->read_timer = 0;
|
|
Packit |
c04fcb |
chan->read_timer_enabled = 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static void channel_unregister_notifiers(struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
lib_ring_buffer_channel_switch_timer_stop(chan);
|
|
Packit |
c04fcb |
lib_ring_buffer_channel_read_timer_stop(chan);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static void channel_print_errors(struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config =
|
|
Packit |
c04fcb |
&chan->backend.config;
|
|
Packit |
c04fcb |
int cpu;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
|
|
Packit |
c04fcb |
for_each_possible_cpu(cpu) {
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf =
|
|
Packit |
c04fcb |
shmp(handle, chan->backend.buf[cpu].shmp);
|
|
Packit |
c04fcb |
lib_ring_buffer_print_errors(chan, buf, cpu, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf =
|
|
Packit |
c04fcb |
shmp(handle, chan->backend.buf[0].shmp);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
lib_ring_buffer_print_errors(chan, buf, -1, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static void channel_free(struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
channel_backend_free(&chan->backend, handle);
|
|
Packit |
c04fcb |
/* chan is freed by shm teardown */
|
|
Packit |
c04fcb |
shm_object_table_destroy(handle->table);
|
|
Packit |
c04fcb |
free(handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* channel_create - Create channel.
|
|
Packit |
c04fcb |
* @config: ring buffer instance configuration
|
|
Packit |
c04fcb |
* @name: name of the channel
|
|
Packit |
c04fcb |
* @priv_data: ring buffer client private data area pointer (output)
|
|
Packit |
c04fcb |
* @priv_data_size: length, in bytes, of the private data area.
|
|
Packit |
c04fcb |
* @priv_data_init: initialization data for private data.
|
|
Packit |
c04fcb |
* @buf_addr: pointer the the beginning of the preallocated buffer contiguous
|
|
Packit |
c04fcb |
* address mapping. It is used only by RING_BUFFER_STATIC
|
|
Packit |
c04fcb |
* configuration. It can be set to NULL for other backends.
|
|
Packit |
c04fcb |
* @subbuf_size: subbuffer size
|
|
Packit |
c04fcb |
* @num_subbuf: number of subbuffers
|
|
Packit |
c04fcb |
* @switch_timer_interval: Time interval (in us) to fill sub-buffers with
|
|
Packit |
c04fcb |
* padding to let readers get those sub-buffers.
|
|
Packit |
c04fcb |
* Used for live streaming.
|
|
Packit |
c04fcb |
* @read_timer_interval: Time interval (in us) to wake up pending readers.
|
|
Packit |
c04fcb |
* @stream_fds: array of stream file descriptors.
|
|
Packit |
c04fcb |
* @nr_stream_fds: number of file descriptors in array.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Holds cpu hotplug.
|
|
Packit |
c04fcb |
* Returns NULL on failure.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
|
|
Packit |
c04fcb |
const char *name,
|
|
Packit |
c04fcb |
void **priv_data,
|
|
Packit |
c04fcb |
size_t priv_data_align,
|
|
Packit |
c04fcb |
size_t priv_data_size,
|
|
Packit |
c04fcb |
void *priv_data_init,
|
|
Packit |
c04fcb |
void *buf_addr, size_t subbuf_size,
|
|
Packit |
c04fcb |
size_t num_subbuf, unsigned int switch_timer_interval,
|
|
Packit |
c04fcb |
unsigned int read_timer_interval,
|
|
Packit |
c04fcb |
const int *stream_fds, int nr_stream_fds)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
size_t shmsize, chansize;
|
|
Packit |
c04fcb |
struct channel *chan;
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle;
|
|
Packit |
c04fcb |
struct shm_object *shmobj;
|
|
Packit |
c04fcb |
unsigned int nr_streams;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
|
|
Packit |
c04fcb |
nr_streams = num_possible_cpus();
|
|
Packit |
c04fcb |
else
|
|
Packit |
c04fcb |
nr_streams = 1;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (nr_stream_fds != nr_streams)
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (lib_ring_buffer_check_config(config, switch_timer_interval,
|
|
Packit |
c04fcb |
read_timer_interval))
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
|
|
Packit |
c04fcb |
if (!handle)
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Allocate table for channel + per-cpu buffers */
|
|
Packit |
c04fcb |
handle->table = shm_object_table_create(1 + num_possible_cpus());
|
|
Packit |
c04fcb |
if (!handle->table)
|
|
Packit |
c04fcb |
goto error_table_alloc;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Calculate the shm allocation layout */
|
|
Packit |
c04fcb |
shmsize = sizeof(struct channel);
|
|
Packit |
c04fcb |
shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
|
|
Packit |
c04fcb |
shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams;
|
|
Packit |
c04fcb |
chansize = shmsize;
|
|
Packit |
c04fcb |
if (priv_data_align)
|
|
Packit |
c04fcb |
shmsize += offset_align(shmsize, priv_data_align);
|
|
Packit |
c04fcb |
shmsize += priv_data_size;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Allocate normal memory for channel (not shared) */
|
|
Packit |
c04fcb |
shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
|
|
Packit |
c04fcb |
-1);
|
|
Packit |
c04fcb |
if (!shmobj)
|
|
Packit |
c04fcb |
goto error_append;
|
|
Packit |
c04fcb |
/* struct channel is at object 0, offset 0 (hardcoded) */
|
|
Packit |
c04fcb |
set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
|
|
Packit |
c04fcb |
assert(handle->chan._ref.index == 0);
|
|
Packit |
c04fcb |
assert(handle->chan._ref.offset == 0);
|
|
Packit |
c04fcb |
chan = shmp(handle, handle->chan);
|
|
Packit |
c04fcb |
if (!chan)
|
|
Packit |
c04fcb |
goto error_append;
|
|
Packit |
c04fcb |
chan->nr_streams = nr_streams;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* space for private data */
|
|
Packit |
c04fcb |
if (priv_data_size) {
|
|
Packit |
c04fcb |
DECLARE_SHMP(void, priv_data_alloc);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
align_shm(shmobj, priv_data_align);
|
|
Packit |
c04fcb |
chan->priv_data_offset = shmobj->allocated_len;
|
|
Packit |
c04fcb |
set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
|
|
Packit |
c04fcb |
if (!shmp(handle, priv_data_alloc))
|
|
Packit |
c04fcb |
goto error_append;
|
|
Packit |
c04fcb |
*priv_data = channel_get_private(chan);
|
|
Packit |
c04fcb |
memcpy(*priv_data, priv_data_init, priv_data_size);
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
chan->priv_data_offset = -1;
|
|
Packit |
c04fcb |
if (priv_data)
|
|
Packit |
c04fcb |
*priv_data = NULL;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ret = channel_backend_init(&chan->backend, name, config,
|
|
Packit |
c04fcb |
subbuf_size, num_subbuf, handle,
|
|
Packit |
c04fcb |
stream_fds);
|
|
Packit |
c04fcb |
if (ret)
|
|
Packit |
c04fcb |
goto error_backend_init;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
chan->handle = handle;
|
|
Packit |
c04fcb |
chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
chan->switch_timer_interval = switch_timer_interval;
|
|
Packit |
c04fcb |
chan->read_timer_interval = read_timer_interval;
|
|
Packit |
c04fcb |
lib_ring_buffer_channel_switch_timer_start(chan);
|
|
Packit |
c04fcb |
lib_ring_buffer_channel_read_timer_start(chan);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
return handle;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
error_backend_init:
|
|
Packit |
c04fcb |
error_append:
|
|
Packit |
c04fcb |
shm_object_table_destroy(handle->table);
|
|
Packit |
c04fcb |
error_table_alloc:
|
|
Packit |
c04fcb |
free(handle);
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *channel_handle_create(void *data,
|
|
Packit |
c04fcb |
uint64_t memory_map_size,
|
|
Packit |
c04fcb |
int wakeup_fd)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle;
|
|
Packit |
c04fcb |
struct shm_object *object;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
|
|
Packit |
c04fcb |
if (!handle)
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Allocate table for channel + per-cpu buffers */
|
|
Packit |
c04fcb |
handle->table = shm_object_table_create(1 + num_possible_cpus());
|
|
Packit |
c04fcb |
if (!handle->table)
|
|
Packit |
c04fcb |
goto error_table_alloc;
|
|
Packit |
c04fcb |
/* Add channel object */
|
|
Packit |
c04fcb |
object = shm_object_table_append_mem(handle->table, data,
|
|
Packit |
c04fcb |
memory_map_size, wakeup_fd);
|
|
Packit |
c04fcb |
if (!object)
|
|
Packit |
c04fcb |
goto error_table_object;
|
|
Packit |
c04fcb |
/* struct channel is at object 0, offset 0 (hardcoded) */
|
|
Packit |
c04fcb |
handle->chan._ref.index = 0;
|
|
Packit |
c04fcb |
handle->chan._ref.offset = 0;
|
|
Packit |
c04fcb |
return handle;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
error_table_object:
|
|
Packit |
c04fcb |
shm_object_table_destroy(handle->table);
|
|
Packit |
c04fcb |
error_table_alloc:
|
|
Packit |
c04fcb |
free(handle);
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
|
|
Packit |
c04fcb |
int shm_fd, int wakeup_fd, uint32_t stream_nr,
|
|
Packit |
c04fcb |
uint64_t memory_map_size)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct shm_object *object;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Add stream object */
|
|
Packit |
c04fcb |
object = shm_object_table_append_shm(handle->table,
|
|
Packit |
c04fcb |
shm_fd, wakeup_fd, stream_nr,
|
|
Packit |
c04fcb |
memory_map_size);
|
|
Packit |
c04fcb |
if (!object)
|
|
Packit |
c04fcb |
return -EINVAL;
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
assert(handle->table);
|
|
Packit |
c04fcb |
return handle->table->allocated_len - 1;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
channel_free(chan, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* channel_destroy - Finalize, wait for q.s. and destroy channel.
|
|
Packit |
c04fcb |
* @chan: channel to destroy
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Holds cpu hotplug.
|
|
Packit |
c04fcb |
* Call "destroy" callback, finalize channels, decrement the channel
|
|
Packit |
c04fcb |
* reference count. Note that when readers have completed data
|
|
Packit |
c04fcb |
* consumption of finalized channels, get_subbuf() will return -ENODATA.
|
|
Packit |
c04fcb |
* They should release their handle at that point.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
|
|
Packit |
c04fcb |
int consumer)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
if (consumer) {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Note: the consumer takes care of finalizing and
|
|
Packit |
c04fcb |
* switching the buffers.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
channel_unregister_notifiers(chan, handle);
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* The consumer prints errors.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
channel_print_errors(chan, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* sessiond/consumer are keeping a reference on the shm file
|
|
Packit |
c04fcb |
* descriptor directly. No need to refcount.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
channel_release(chan, handle);
|
|
Packit |
c04fcb |
return;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config,
|
|
Packit |
c04fcb |
struct channel *chan, int cpu,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle,
|
|
Packit |
c04fcb |
int *shm_fd, int *wait_fd,
|
|
Packit |
c04fcb |
int *wakeup_fd,
|
|
Packit |
c04fcb |
uint64_t *memory_map_size)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct shm_ref *ref;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
|
|
Packit |
c04fcb |
cpu = 0;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
if (cpu >= num_possible_cpus())
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
ref = &chan->backend.buf[cpu].shmp._ref;
|
|
Packit |
c04fcb |
*shm_fd = shm_get_shm_fd(handle, ref);
|
|
Packit |
c04fcb |
*wait_fd = shm_get_wait_fd(handle, ref);
|
|
Packit |
c04fcb |
*wakeup_fd = shm_get_wakeup_fd(handle, ref);
|
|
Packit |
c04fcb |
if (shm_get_shm_size(handle, ref, memory_map_size))
|
|
Packit |
c04fcb |
return NULL;
|
|
Packit |
c04fcb |
return shmp(handle, chan->backend.buf[cpu].shmp);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct shm_ref *ref;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ref = &handle->chan._ref;
|
|
Packit |
c04fcb |
return shm_close_wait_fd(handle, ref);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct shm_ref *ref;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ref = &handle->chan._ref;
|
|
Packit |
c04fcb |
return shm_close_wakeup_fd(handle, ref);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle,
|
|
Packit |
c04fcb |
int cpu)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct shm_ref *ref;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
|
|
Packit |
c04fcb |
cpu = 0;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
if (cpu >= num_possible_cpus())
|
|
Packit |
c04fcb |
return -EINVAL;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
ref = &chan->backend.buf[cpu].shmp._ref;
|
|
Packit |
c04fcb |
return shm_close_wait_fd(handle, ref);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle,
|
|
Packit |
c04fcb |
int cpu)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct shm_ref *ref;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
|
|
Packit |
c04fcb |
cpu = 0;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
if (cpu >= num_possible_cpus())
|
|
Packit |
c04fcb |
return -EINVAL;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
ref = &chan->backend.buf[cpu].shmp._ref;
|
|
Packit |
c04fcb |
pthread_mutex_lock(&wakeup_fd_mutex);
|
|
Packit |
c04fcb |
ret = shm_close_wakeup_fd(handle, ref);
|
|
Packit |
c04fcb |
pthread_mutex_unlock(&wakeup_fd_mutex);
|
|
Packit |
c04fcb |
return ret;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
|
|
Packit |
c04fcb |
return -EBUSY;
|
|
Packit |
c04fcb |
cmm_smp_mb();
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct channel *chan = shmp(handle, buf->backend.chan);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
|
|
Packit |
c04fcb |
cmm_smp_mb();
|
|
Packit |
c04fcb |
uatomic_dec(&buf->active_readers);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
|
|
Packit |
c04fcb |
* @buf: ring buffer
|
|
Packit |
c04fcb |
* @consumed: consumed count indicating the position where to read
|
|
Packit |
c04fcb |
* @produced: produced count, indicates position when to stop reading
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
|
|
Packit |
c04fcb |
* data to read at consumed position, or 0 if the get operation succeeds.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
unsigned long *consumed, unsigned long *produced,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct channel *chan = shmp(handle, buf->backend.chan);
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long consumed_cur, write_offset;
|
|
Packit |
c04fcb |
int finalized;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
finalized = CMM_ACCESS_ONCE(buf->finalized);
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Read finalized before counters.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
cmm_smp_rmb();
|
|
Packit |
c04fcb |
consumed_cur = uatomic_read(&buf->consumed);
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* No need to issue a memory barrier between consumed count read and
|
|
Packit |
c04fcb |
* write offset read, because consumed count can only change
|
|
Packit |
c04fcb |
* concurrently in overwrite mode, and we keep a sequence counter
|
|
Packit |
c04fcb |
* identifier derived from the write offset to check we are getting
|
|
Packit |
c04fcb |
* the same sub-buffer we are expecting (the sub-buffers are atomically
|
|
Packit |
c04fcb |
* "tagged" upon writes, tags are checked upon read).
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
write_offset = v_read(config, &buf->offset);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Check that we are not about to read the same subbuffer in
|
|
Packit |
c04fcb |
* which the writer head is.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
|
|
Packit |
c04fcb |
== 0)
|
|
Packit |
c04fcb |
goto nodata;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
*consumed = consumed_cur;
|
|
Packit |
c04fcb |
*produced = subbuf_trunc(write_offset, chan);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
nodata:
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* The memory barriers __wait_event()/wake_up_interruptible() take care
|
|
Packit |
c04fcb |
* of "raw_spin_is_locked" memory ordering.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (finalized)
|
|
Packit |
c04fcb |
return -ENODATA;
|
|
Packit |
c04fcb |
else
|
|
Packit |
c04fcb |
return -EAGAIN;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* lib_ring_buffer_move_consumer - move consumed counter forward
|
|
Packit |
c04fcb |
* @buf: ring buffer
|
|
Packit |
c04fcb |
* @consumed_new: new consumed count value
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
unsigned long consumed_new,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
|
|
Packit |
c04fcb |
struct channel *chan = shmp(handle, bufb->chan);
|
|
Packit |
c04fcb |
unsigned long consumed;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Only push the consumed value forward.
|
|
Packit |
c04fcb |
* If the consumed cmpxchg fails, this is because we have been pushed by
|
|
Packit |
c04fcb |
* the writer in flight recorder mode.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
consumed = uatomic_read(&buf->consumed);
|
|
Packit |
c04fcb |
while ((long) consumed - (long) consumed_new < 0)
|
|
Packit |
c04fcb |
consumed = uatomic_cmpxchg(&buf->consumed, consumed,
|
|
Packit |
c04fcb |
consumed_new);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
|
|
Packit |
c04fcb |
* @buf: ring buffer
|
|
Packit |
c04fcb |
* @consumed: consumed count indicating the position where to read
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
|
|
Packit |
c04fcb |
* data to read at consumed position, or 0 if the get operation succeeds.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
unsigned long consumed,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct channel *chan = shmp(handle, buf->backend.chan);
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
|
|
Packit |
c04fcb |
int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
retry:
|
|
Packit |
c04fcb |
finalized = CMM_ACCESS_ONCE(buf->finalized);
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Read finalized before counters.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
cmm_smp_rmb();
|
|
Packit |
c04fcb |
consumed_cur = uatomic_read(&buf->consumed);
|
|
Packit |
c04fcb |
consumed_idx = subbuf_index(consumed, chan);
|
|
Packit |
c04fcb |
commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Make sure we read the commit count before reading the buffer
|
|
Packit |
c04fcb |
* data and the write offset. Correct consumed offset ordering
|
|
Packit |
c04fcb |
* wrt commit count is insured by the use of cmpxchg to update
|
|
Packit |
c04fcb |
* the consumed offset.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Local rmb to match the remote wmb to read the commit count
|
|
Packit |
c04fcb |
* before the buffer data and the write offset.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
cmm_smp_rmb();
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
write_offset = v_read(config, &buf->offset);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Check that the buffer we are getting is after or at consumed_cur
|
|
Packit |
c04fcb |
* position.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if ((long) subbuf_trunc(consumed, chan)
|
|
Packit |
c04fcb |
- (long) subbuf_trunc(consumed_cur, chan) < 0)
|
|
Packit |
c04fcb |
goto nodata;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Check that the subbuffer we are trying to consume has been
|
|
Packit |
c04fcb |
* already fully committed. There are a few causes that can make
|
|
Packit |
c04fcb |
* this unavailability situation occur:
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Temporary (short-term) situation:
|
|
Packit |
c04fcb |
* - Application is running on a different CPU, between reserve
|
|
Packit |
c04fcb |
* and commit ring buffer operations,
|
|
Packit |
c04fcb |
* - Application is preempted between reserve and commit ring
|
|
Packit |
c04fcb |
* buffer operations,
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Long-term situation:
|
|
Packit |
c04fcb |
* - Application is stopped (SIGSTOP) between reserve and commit
|
|
Packit |
c04fcb |
* ring buffer operations. Could eventually be resumed by
|
|
Packit |
c04fcb |
* SIGCONT.
|
|
Packit |
c04fcb |
* - Application is killed (SIGTERM, SIGINT, SIGKILL) between
|
|
Packit |
c04fcb |
* reserve and commit ring buffer operation.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* From a consumer perspective, handling short-term
|
|
Packit |
c04fcb |
* unavailability situations is performed by retrying a few
|
|
Packit |
c04fcb |
* times after a delay. Handling long-term unavailability
|
|
Packit |
c04fcb |
* situations is handled by failing to get the sub-buffer.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* In all of those situations, if the application is taking a
|
|
Packit |
c04fcb |
* long time to perform its commit after ring buffer space
|
|
Packit |
c04fcb |
* reservation, we can end up in a situation where the producer
|
|
Packit |
c04fcb |
* will fill the ring buffer and try to write into the same
|
|
Packit |
c04fcb |
* sub-buffer again (which has a missing commit). This is
|
|
Packit |
c04fcb |
* handled by the producer in the sub-buffer switch handling
|
|
Packit |
c04fcb |
* code of the reserve routine by detecting unbalanced
|
|
Packit |
c04fcb |
* reserve/commit counters and discarding all further events
|
|
Packit |
c04fcb |
* until the situation is resolved in those situations. Two
|
|
Packit |
c04fcb |
* scenarios can occur:
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* 1) The application causing the reserve/commit counters to be
|
|
Packit |
c04fcb |
* unbalanced has been terminated. In this situation, all
|
|
Packit |
c04fcb |
* further events will be discarded in the buffers, and no
|
|
Packit |
c04fcb |
* further buffer data will be readable by the consumer
|
|
Packit |
c04fcb |
* daemon. Tearing down the UST tracing session and starting
|
|
Packit |
c04fcb |
* anew is a work-around for those situations. Note that this
|
|
Packit |
c04fcb |
* only affects per-UID tracing. In per-PID tracing, the
|
|
Packit |
c04fcb |
* application vanishes with the termination, and therefore
|
|
Packit |
c04fcb |
* no more data needs to be written to the buffers.
|
|
Packit |
c04fcb |
* 2) The application causing the unbalance has been delayed for
|
|
Packit |
c04fcb |
* a long time, but will eventually try to increment the
|
|
Packit |
c04fcb |
* commit counter after eventually writing to the sub-buffer.
|
|
Packit |
c04fcb |
* This situation can cause events to be discarded until the
|
|
Packit |
c04fcb |
* application resumes its operations.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (((commit_count - chan->backend.subbuf_size)
|
|
Packit |
c04fcb |
& chan->commit_count_mask)
|
|
Packit |
c04fcb |
- (buf_trunc(consumed, chan)
|
|
Packit |
c04fcb |
>> chan->backend.num_subbuf_order)
|
|
Packit |
c04fcb |
!= 0) {
|
|
Packit |
c04fcb |
if (nr_retry-- > 0) {
|
|
Packit |
c04fcb |
if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
|
|
Packit |
c04fcb |
(void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
|
|
Packit |
c04fcb |
goto retry;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
goto nodata;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Check that we are not about to read the same subbuffer in
|
|
Packit |
c04fcb |
* which the writer head is.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
|
|
Packit |
c04fcb |
== 0)
|
|
Packit |
c04fcb |
goto nodata;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Failure to get the subbuffer causes a busy-loop retry without going
|
|
Packit |
c04fcb |
* to a wait queue. These are caused by short-lived race windows where
|
|
Packit |
c04fcb |
* the writer is getting access to a subbuffer we were trying to get
|
|
Packit |
c04fcb |
* access to. Also checks that the "consumed" buffer count we are
|
|
Packit |
c04fcb |
* looking for matches the one contained in the subbuffer id.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* The short-lived race window described here can be affected by
|
|
Packit |
c04fcb |
* application signals and preemption, thus requiring to bound
|
|
Packit |
c04fcb |
* the loop to a maximum number of retry.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
ret = update_read_sb_index(config, &buf->backend, &chan->backend,
|
|
Packit |
c04fcb |
consumed_idx, buf_trunc_val(consumed, chan),
|
|
Packit |
c04fcb |
handle);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
if (nr_retry-- > 0) {
|
|
Packit |
c04fcb |
if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
|
|
Packit |
c04fcb |
(void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
|
|
Packit |
c04fcb |
goto retry;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
goto nodata;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
buf->get_subbuf_consumed = consumed;
|
|
Packit |
c04fcb |
buf->get_subbuf = 1;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
nodata:
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* The memory barriers __wait_event()/wake_up_interruptible() take care
|
|
Packit |
c04fcb |
* of "raw_spin_is_locked" memory ordering.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (finalized)
|
|
Packit |
c04fcb |
return -ENODATA;
|
|
Packit |
c04fcb |
else
|
|
Packit |
c04fcb |
return -EAGAIN;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
|
|
Packit |
c04fcb |
* @buf: ring buffer
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
|
|
Packit |
c04fcb |
struct channel *chan = shmp(handle, bufb->chan);
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long read_sb_bindex, consumed_idx, consumed;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (!buf->get_subbuf) {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Reader puts a subbuffer it did not get.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
CHAN_WARN_ON(chan, 1);
|
|
Packit |
c04fcb |
return;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
consumed = buf->get_subbuf_consumed;
|
|
Packit |
c04fcb |
buf->get_subbuf = 0;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Clear the records_unread counter. (overruns counter)
|
|
Packit |
c04fcb |
* Can still be non-zero if a file reader simply grabbed the data
|
|
Packit |
c04fcb |
* without using iterators.
|
|
Packit |
c04fcb |
* Can be below zero if an iterator is used on a snapshot more than
|
|
Packit |
c04fcb |
* once.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
|
|
Packit |
c04fcb |
v_add(config, v_read(config,
|
|
Packit |
c04fcb |
&shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
|
|
Packit |
c04fcb |
&bufb->records_read);
|
|
Packit |
c04fcb |
v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
|
|
Packit |
c04fcb |
CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
|
|
Packit |
c04fcb |
&& subbuffer_id_is_noref(config, bufb->buf_rsb.id));
|
|
Packit |
c04fcb |
subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Exchange the reader subbuffer with the one we put in its place in the
|
|
Packit |
c04fcb |
* writer subbuffer table. Expect the original consumed count. If
|
|
Packit |
c04fcb |
* update_read_sb_index fails, this is because the writer updated the
|
|
Packit |
c04fcb |
* subbuffer concurrently. We should therefore keep the subbuffer we
|
|
Packit |
c04fcb |
* currently have: it has become invalid to try reading this sub-buffer
|
|
Packit |
c04fcb |
* consumed count value anyway.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
consumed_idx = subbuf_index(consumed, chan);
|
|
Packit |
c04fcb |
update_read_sb_index(config, &buf->backend, &chan->backend,
|
|
Packit |
c04fcb |
consumed_idx, buf_trunc_val(consumed, chan),
|
|
Packit |
c04fcb |
handle);
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* update_read_sb_index return value ignored. Don't exchange sub-buffer
|
|
Packit |
c04fcb |
* if the writer concurrently updated it.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* cons_offset is an iterator on all subbuffer offsets between the reader
|
|
Packit |
c04fcb |
* position and the writer position. (inclusive)
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
unsigned long cons_offset,
|
|
Packit |
c04fcb |
int cpu,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long cons_idx, commit_count, commit_count_sb;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
cons_idx = subbuf_index(cons_offset, chan);
|
|
Packit |
c04fcb |
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
|
|
Packit |
c04fcb |
commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (subbuf_offset(commit_count, chan) != 0)
|
|
Packit |
c04fcb |
DBG("ring buffer %s, cpu %d: "
|
|
Packit |
c04fcb |
"commit count in subbuffer %lu,\n"
|
|
Packit |
c04fcb |
"expecting multiples of %lu bytes\n"
|
|
Packit |
c04fcb |
" [ %lu bytes committed, %lu bytes reader-visible ]\n",
|
|
Packit |
c04fcb |
chan->backend.name, cpu, cons_idx,
|
|
Packit |
c04fcb |
chan->backend.subbuf_size,
|
|
Packit |
c04fcb |
commit_count, commit_count_sb);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
DBG("ring buffer: %s, cpu %d: %lu bytes committed\n",
|
|
Packit |
c04fcb |
chan->backend.name, cpu, commit_count);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
void *priv, int cpu,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long write_offset, cons_offset;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* No need to order commit_count, write_offset and cons_offset reads
|
|
Packit |
c04fcb |
* because we execute at teardown when no more writer nor reader
|
|
Packit |
c04fcb |
* references are left.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
write_offset = v_read(config, &buf->offset);
|
|
Packit |
c04fcb |
cons_offset = uatomic_read(&buf->consumed);
|
|
Packit |
c04fcb |
if (write_offset != cons_offset)
|
|
Packit |
c04fcb |
DBG("ring buffer %s, cpu %d: "
|
|
Packit |
c04fcb |
"non-consumed data\n"
|
|
Packit |
c04fcb |
" [ %lu bytes written, %lu bytes read ]\n",
|
|
Packit |
c04fcb |
chan->backend.name, cpu, write_offset, cons_offset);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
for (cons_offset = uatomic_read(&buf->consumed);
|
|
Packit |
c04fcb |
(long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
|
|
Packit |
c04fcb |
chan)
|
|
Packit |
c04fcb |
- cons_offset) > 0;
|
|
Packit |
c04fcb |
cons_offset = subbuf_align(cons_offset, chan))
|
|
Packit |
c04fcb |
lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
|
|
Packit |
c04fcb |
cpu, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_print_errors(struct channel *chan,
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf, int cpu,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
void *priv = channel_get_private(chan);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
|
|
Packit |
c04fcb |
DBG("ring buffer %s: %lu records written, "
|
|
Packit |
c04fcb |
"%lu records overrun\n",
|
|
Packit |
c04fcb |
chan->backend.name,
|
|
Packit |
c04fcb |
v_read(config, &buf->records_count),
|
|
Packit |
c04fcb |
v_read(config, &buf->records_overrun));
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
DBG("ring buffer %s, cpu %d: %lu records written, "
|
|
Packit |
c04fcb |
"%lu records overrun\n",
|
|
Packit |
c04fcb |
chan->backend.name, cpu,
|
|
Packit |
c04fcb |
v_read(config, &buf->records_count),
|
|
Packit |
c04fcb |
v_read(config, &buf->records_overrun));
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (v_read(config, &buf->records_lost_full)
|
|
Packit |
c04fcb |
|| v_read(config, &buf->records_lost_wrap)
|
|
Packit |
c04fcb |
|| v_read(config, &buf->records_lost_big))
|
|
Packit |
c04fcb |
DBG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
|
|
Packit |
c04fcb |
" [ %lu buffer full, %lu nest buffer wrap-around, "
|
|
Packit |
c04fcb |
"%lu event too big ]\n",
|
|
Packit |
c04fcb |
chan->backend.name, cpu,
|
|
Packit |
c04fcb |
v_read(config, &buf->records_lost_full),
|
|
Packit |
c04fcb |
v_read(config, &buf->records_lost_wrap),
|
|
Packit |
c04fcb |
v_read(config, &buf->records_lost_big));
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* lib_ring_buffer_switch_old_start: Populate old subbuffer header.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Only executed by SWITCH_FLUSH, which can be issued while tracing is
|
|
Packit |
c04fcb |
* active or at buffer finalization (destroy).
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct switch_offsets *offsets,
|
|
Packit |
c04fcb |
uint64_t tsc,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long oldidx = subbuf_index(offsets->old, chan);
|
|
Packit |
c04fcb |
unsigned long commit_count;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
config->cb.buffer_begin(buf, tsc, oldidx, handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Order all writes to buffer before the commit count update that will
|
|
Packit |
c04fcb |
* determine that the subbuffer is full.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
cmm_smp_wmb();
|
|
Packit |
c04fcb |
v_add(config, config->cb.subbuffer_header_size(),
|
|
Packit |
c04fcb |
&shmp_index(handle, buf->commit_hot, oldidx)->cc);
|
|
Packit |
c04fcb |
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
|
|
Packit |
c04fcb |
/* Check if the written buffer has to be delivered */
|
|
Packit |
c04fcb |
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
|
|
Packit |
c04fcb |
commit_count, oldidx, handle, tsc);
|
|
Packit |
c04fcb |
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
|
|
Packit |
c04fcb |
offsets->old + config->cb.subbuffer_header_size(),
|
|
Packit |
c04fcb |
commit_count, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* lib_ring_buffer_switch_old_end: switch old subbuffer
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Note : offset_old should never be 0 here. It is ok, because we never perform
|
|
Packit |
c04fcb |
* buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
|
|
Packit |
c04fcb |
* increments the offset_old value when doing a SWITCH_FLUSH on an empty
|
|
Packit |
c04fcb |
* subbuffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct switch_offsets *offsets,
|
|
Packit |
c04fcb |
uint64_t tsc,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
|
|
Packit |
c04fcb |
unsigned long commit_count, padding_size, data_size;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
data_size = subbuf_offset(offsets->old - 1, chan) + 1;
|
|
Packit |
c04fcb |
padding_size = chan->backend.subbuf_size - data_size;
|
|
Packit |
c04fcb |
subbuffer_set_data_size(config, &buf->backend, oldidx, data_size,
|
|
Packit |
c04fcb |
handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Order all writes to buffer before the commit count update that will
|
|
Packit |
c04fcb |
* determine that the subbuffer is full.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
cmm_smp_wmb();
|
|
Packit |
c04fcb |
v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
|
|
Packit |
c04fcb |
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
|
|
Packit |
c04fcb |
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
|
|
Packit |
c04fcb |
commit_count, oldidx, handle, tsc);
|
|
Packit |
c04fcb |
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
|
|
Packit |
c04fcb |
offsets->old + padding_size, commit_count, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* lib_ring_buffer_switch_new_start: Populate new subbuffer.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* This code can be executed unordered : writers may already have written to the
|
|
Packit |
c04fcb |
* sub-buffer before this code gets executed, caution. The commit makes sure
|
|
Packit |
c04fcb |
* that this code is executed before the deliver of this sub-buffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct switch_offsets *offsets,
|
|
Packit |
c04fcb |
uint64_t tsc,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long beginidx = subbuf_index(offsets->begin, chan);
|
|
Packit |
c04fcb |
unsigned long commit_count;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
config->cb.buffer_begin(buf, tsc, beginidx, handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Order all writes to buffer before the commit count update that will
|
|
Packit |
c04fcb |
* determine that the subbuffer is full.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
cmm_smp_wmb();
|
|
Packit |
c04fcb |
v_add(config, config->cb.subbuffer_header_size(),
|
|
Packit |
c04fcb |
&shmp_index(handle, buf->commit_hot, beginidx)->cc);
|
|
Packit |
c04fcb |
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
|
|
Packit |
c04fcb |
/* Check if the written buffer has to be delivered */
|
|
Packit |
c04fcb |
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
|
|
Packit |
c04fcb |
commit_count, beginidx, handle, tsc);
|
|
Packit |
c04fcb |
lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
|
|
Packit |
c04fcb |
offsets->begin + config->cb.subbuffer_header_size(),
|
|
Packit |
c04fcb |
commit_count, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* lib_ring_buffer_switch_new_end: finish switching current subbuffer
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Calls subbuffer_set_data_size() to set the data size of the current
|
|
Packit |
c04fcb |
* sub-buffer. We do not need to perform check_deliver nor commit here,
|
|
Packit |
c04fcb |
* since this task will be done by the "commit" of the event for which
|
|
Packit |
c04fcb |
* we are currently doing the space reservation.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct switch_offsets *offsets,
|
|
Packit |
c04fcb |
uint64_t tsc,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long endidx, data_size;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
endidx = subbuf_index(offsets->end - 1, chan);
|
|
Packit |
c04fcb |
data_size = subbuf_offset(offsets->end - 1, chan) + 1;
|
|
Packit |
c04fcb |
subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
|
|
Packit |
c04fcb |
handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Returns :
|
|
Packit |
c04fcb |
* 0 if ok
|
|
Packit |
c04fcb |
* !0 if execution must be aborted.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct switch_offsets *offsets,
|
|
Packit |
c04fcb |
uint64_t *tsc,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
unsigned long off, reserve_commit_diff;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
offsets->begin = v_read(config, &buf->offset);
|
|
Packit |
c04fcb |
offsets->old = offsets->begin;
|
|
Packit |
c04fcb |
offsets->switch_old_start = 0;
|
|
Packit |
c04fcb |
off = subbuf_offset(offsets->begin, chan);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
*tsc = config->cb.ring_buffer_clock_read(chan);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Ensure we flush the header of an empty subbuffer when doing the
|
|
Packit |
c04fcb |
* finalize (SWITCH_FLUSH). This ensures that we end up knowing the
|
|
Packit |
c04fcb |
* total data gathering duration even if there were no records saved
|
|
Packit |
c04fcb |
* after the last buffer switch.
|
|
Packit |
c04fcb |
* In SWITCH_ACTIVE mode, switch the buffer when it contains events.
|
|
Packit |
c04fcb |
* SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
|
|
Packit |
c04fcb |
* subbuffer header as appropriate.
|
|
Packit |
c04fcb |
* The next record that reserves space will be responsible for
|
|
Packit |
c04fcb |
* populating the following subbuffer header. We choose not to populate
|
|
Packit |
c04fcb |
* the next subbuffer header here because we want to be able to use
|
|
Packit |
c04fcb |
* SWITCH_ACTIVE for periodical buffer flush, which must
|
|
Packit |
c04fcb |
* guarantee that all the buffer content (records and header
|
|
Packit |
c04fcb |
* timestamps) are visible to the reader. This is required for
|
|
Packit |
c04fcb |
* quiescence guarantees for the fusion merge.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (mode != SWITCH_FLUSH && !off)
|
|
Packit |
c04fcb |
return -1; /* we do not have to switch : buffer is empty */
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (caa_unlikely(off == 0)) {
|
|
Packit |
c04fcb |
unsigned long sb_index, commit_count;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* We are performing a SWITCH_FLUSH. There may be concurrent
|
|
Packit |
c04fcb |
* writes into the buffer if e.g. invoked while performing a
|
|
Packit |
c04fcb |
* snapshot on an active trace.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* If the client does not save any header information
|
|
Packit |
c04fcb |
* (sub-buffer header size == 0), don't switch empty subbuffer
|
|
Packit |
c04fcb |
* on finalize, because it is invalid to deliver a completely
|
|
Packit |
c04fcb |
* empty subbuffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (!config->cb.subbuffer_header_size())
|
|
Packit |
c04fcb |
return -1;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/* Test new buffer integrity */
|
|
Packit |
c04fcb |
sb_index = subbuf_index(offsets->begin, chan);
|
|
Packit |
c04fcb |
commit_count = v_read(config,
|
|
Packit |
c04fcb |
&shmp_index(handle, buf->commit_cold,
|
|
Packit |
c04fcb |
sb_index)->cc_sb);
|
|
Packit |
c04fcb |
reserve_commit_diff =
|
|
Packit |
c04fcb |
(buf_trunc(offsets->begin, chan)
|
|
Packit |
c04fcb |
>> chan->backend.num_subbuf_order)
|
|
Packit |
c04fcb |
- (commit_count & chan->commit_count_mask);
|
|
Packit |
c04fcb |
if (caa_likely(reserve_commit_diff == 0)) {
|
|
Packit |
c04fcb |
/* Next subbuffer not being written to. */
|
|
Packit |
c04fcb |
if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
|
|
Packit |
c04fcb |
subbuf_trunc(offsets->begin, chan)
|
|
Packit |
c04fcb |
- subbuf_trunc((unsigned long)
|
|
Packit |
c04fcb |
uatomic_read(&buf->consumed), chan)
|
|
Packit |
c04fcb |
>= chan->backend.buf_size)) {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* We do not overwrite non consumed buffers
|
|
Packit |
c04fcb |
* and we are full : don't switch.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
return -1;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Next subbuffer not being written to, and we
|
|
Packit |
c04fcb |
* are either in overwrite mode or the buffer is
|
|
Packit |
c04fcb |
* not full. It's safe to write in this new
|
|
Packit |
c04fcb |
* subbuffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Next subbuffer reserve offset does not match the
|
|
Packit |
c04fcb |
* commit offset. Don't perform switch in
|
|
Packit |
c04fcb |
* producer-consumer and overwrite mode. Caused by
|
|
Packit |
c04fcb |
* either a writer OOPS or too many nested writes over a
|
|
Packit |
c04fcb |
* reserve/commit pair.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
return -1;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Need to write the subbuffer start header on finalize.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
offsets->switch_old_start = 1;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
offsets->begin = subbuf_align(offsets->begin, chan);
|
|
Packit |
c04fcb |
/* Note: old points to the next subbuf at offset 0 */
|
|
Packit |
c04fcb |
offsets->end = offsets->begin;
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Force a sub-buffer switch. This operation is completely reentrant : can be
|
|
Packit |
c04fcb |
* called while tracing is active with absolutely no lock held.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Note, however, that as a v_cmpxchg is used for some atomic
|
|
Packit |
c04fcb |
* operations, this function must be called from the CPU which owns the buffer
|
|
Packit |
c04fcb |
* for a ACTIVE flush.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct channel *chan = shmp(handle, buf->backend.chan);
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
struct switch_offsets offsets;
|
|
Packit |
c04fcb |
unsigned long oldidx;
|
|
Packit |
c04fcb |
uint64_t tsc;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
offsets.size = 0;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Perform retryable operations.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
do {
|
|
Packit |
c04fcb |
if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
|
|
Packit |
c04fcb |
&tsc, handle))
|
|
Packit |
c04fcb |
return; /* Switch not needed */
|
|
Packit |
c04fcb |
} while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
|
|
Packit |
c04fcb |
!= offsets.old);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Atomically update last_tsc. This update races against concurrent
|
|
Packit |
c04fcb |
* atomic updates, but the race will always cause supplementary full TSC
|
|
Packit |
c04fcb |
* records, never the opposite (missing a full TSC record when it would
|
|
Packit |
c04fcb |
* be needed).
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
save_last_tsc(config, buf, tsc);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Push the reader if necessary
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
oldidx = subbuf_index(offsets.old, chan);
|
|
Packit |
c04fcb |
lib_ring_buffer_clear_noref(config, &buf->backend, oldidx, handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* May need to populate header start on SWITCH_FLUSH.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (offsets.switch_old_start) {
|
|
Packit |
c04fcb |
lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
|
|
Packit |
c04fcb |
offsets.old += config->cb.subbuffer_header_size();
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Switch old subbuffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Returns :
|
|
Packit |
c04fcb |
* 0 if ok
|
|
Packit |
c04fcb |
* -ENOSPC if event size is too large for packet.
|
|
Packit |
c04fcb |
* -ENOBUFS if there is currently not enough space in buffer for the event.
|
|
Packit |
c04fcb |
* -EIO if data cannot be written into the buffer for any other reason.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
static
|
|
Packit |
c04fcb |
int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
|
|
Packit |
c04fcb |
struct channel *chan,
|
|
Packit |
c04fcb |
struct switch_offsets *offsets,
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer_ctx *ctx)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle = ctx->handle;
|
|
Packit |
c04fcb |
unsigned long reserve_commit_diff, offset_cmp;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
retry:
|
|
Packit |
c04fcb |
offsets->begin = offset_cmp = v_read(config, &buf->offset);
|
|
Packit |
c04fcb |
offsets->old = offsets->begin;
|
|
Packit |
c04fcb |
offsets->switch_new_start = 0;
|
|
Packit |
c04fcb |
offsets->switch_new_end = 0;
|
|
Packit |
c04fcb |
offsets->switch_old_end = 0;
|
|
Packit |
c04fcb |
offsets->pre_header_padding = 0;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ctx->tsc = config->cb.ring_buffer_clock_read(chan);
|
|
Packit |
c04fcb |
if ((int64_t) ctx->tsc == -EIO)
|
|
Packit |
c04fcb |
return -EIO;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (last_tsc_overflow(config, buf, ctx->tsc))
|
|
Packit |
c04fcb |
ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
|
|
Packit |
c04fcb |
offsets->switch_new_start = 1; /* For offsets->begin */
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
offsets->size = config->cb.record_header_size(config, chan,
|
|
Packit |
c04fcb |
offsets->begin,
|
|
Packit |
c04fcb |
&offsets->pre_header_padding,
|
|
Packit |
c04fcb |
ctx);
|
|
Packit |
c04fcb |
offsets->size +=
|
|
Packit |
c04fcb |
lib_ring_buffer_align(offsets->begin + offsets->size,
|
|
Packit |
c04fcb |
ctx->largest_align)
|
|
Packit |
c04fcb |
+ ctx->data_size;
|
|
Packit |
c04fcb |
if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
|
|
Packit |
c04fcb |
offsets->size > chan->backend.subbuf_size)) {
|
|
Packit |
c04fcb |
offsets->switch_old_end = 1; /* For offsets->old */
|
|
Packit |
c04fcb |
offsets->switch_new_start = 1; /* For offsets->begin */
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
if (caa_unlikely(offsets->switch_new_start)) {
|
|
Packit |
c04fcb |
unsigned long sb_index, commit_count;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* We are typically not filling the previous buffer completely.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (caa_likely(offsets->switch_old_end))
|
|
Packit |
c04fcb |
offsets->begin = subbuf_align(offsets->begin, chan);
|
|
Packit |
c04fcb |
offsets->begin = offsets->begin
|
|
Packit |
c04fcb |
+ config->cb.subbuffer_header_size();
|
|
Packit |
c04fcb |
/* Test new buffer integrity */
|
|
Packit |
c04fcb |
sb_index = subbuf_index(offsets->begin, chan);
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Read buf->offset before buf->commit_cold[sb_index].cc_sb.
|
|
Packit |
c04fcb |
* lib_ring_buffer_check_deliver() has the matching
|
|
Packit |
c04fcb |
* memory barriers required around commit_cold cc_sb
|
|
Packit |
c04fcb |
* updates to ensure reserve and commit counter updates
|
|
Packit |
c04fcb |
* are not seen reordered when updated by another CPU.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
cmm_smp_rmb();
|
|
Packit |
c04fcb |
commit_count = v_read(config,
|
|
Packit |
c04fcb |
&shmp_index(handle, buf->commit_cold,
|
|
Packit |
c04fcb |
sb_index)->cc_sb);
|
|
Packit |
c04fcb |
/* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
|
|
Packit |
c04fcb |
cmm_smp_rmb();
|
|
Packit |
c04fcb |
if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* The reserve counter have been concurrently updated
|
|
Packit |
c04fcb |
* while we read the commit counter. This means the
|
|
Packit |
c04fcb |
* commit counter we read might not match buf->offset
|
|
Packit |
c04fcb |
* due to concurrent update. We therefore need to retry.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
goto retry;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
reserve_commit_diff =
|
|
Packit |
c04fcb |
(buf_trunc(offsets->begin, chan)
|
|
Packit |
c04fcb |
>> chan->backend.num_subbuf_order)
|
|
Packit |
c04fcb |
- (commit_count & chan->commit_count_mask);
|
|
Packit |
c04fcb |
if (caa_likely(reserve_commit_diff == 0)) {
|
|
Packit |
c04fcb |
/* Next subbuffer not being written to. */
|
|
Packit |
c04fcb |
if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
|
|
Packit |
c04fcb |
subbuf_trunc(offsets->begin, chan)
|
|
Packit |
c04fcb |
- subbuf_trunc((unsigned long)
|
|
Packit |
c04fcb |
uatomic_read(&buf->consumed), chan)
|
|
Packit |
c04fcb |
>= chan->backend.buf_size)) {
|
|
Packit |
c04fcb |
unsigned long nr_lost;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* We do not overwrite non consumed buffers
|
|
Packit |
c04fcb |
* and we are full : record is lost.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
nr_lost = v_read(config, &buf->records_lost_full);
|
|
Packit |
c04fcb |
v_inc(config, &buf->records_lost_full);
|
|
Packit |
c04fcb |
if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
|
|
Packit |
c04fcb |
DBG("%lu or more records lost in (%s:%d) (buffer full)\n",
|
|
Packit |
c04fcb |
nr_lost + 1, chan->backend.name,
|
|
Packit |
c04fcb |
buf->backend.cpu);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
return -ENOBUFS;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Next subbuffer not being written to, and we
|
|
Packit |
c04fcb |
* are either in overwrite mode or the buffer is
|
|
Packit |
c04fcb |
* not full. It's safe to write in this new
|
|
Packit |
c04fcb |
* subbuffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
unsigned long nr_lost;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Next subbuffer reserve offset does not match the
|
|
Packit |
c04fcb |
* commit offset, and this did not involve update to the
|
|
Packit |
c04fcb |
* reserve counter. Drop record in producer-consumer and
|
|
Packit |
c04fcb |
* overwrite mode. Caused by either a writer OOPS or too
|
|
Packit |
c04fcb |
* many nested writes over a reserve/commit pair.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
nr_lost = v_read(config, &buf->records_lost_wrap);
|
|
Packit |
c04fcb |
v_inc(config, &buf->records_lost_wrap);
|
|
Packit |
c04fcb |
if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
|
|
Packit |
c04fcb |
DBG("%lu or more records lost in (%s:%d) (wrap-around)\n",
|
|
Packit |
c04fcb |
nr_lost + 1, chan->backend.name,
|
|
Packit |
c04fcb |
buf->backend.cpu);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
return -EIO;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
offsets->size =
|
|
Packit |
c04fcb |
config->cb.record_header_size(config, chan,
|
|
Packit |
c04fcb |
offsets->begin,
|
|
Packit |
c04fcb |
&offsets->pre_header_padding,
|
|
Packit |
c04fcb |
ctx);
|
|
Packit |
c04fcb |
offsets->size +=
|
|
Packit |
c04fcb |
lib_ring_buffer_align(offsets->begin + offsets->size,
|
|
Packit |
c04fcb |
ctx->largest_align)
|
|
Packit |
c04fcb |
+ ctx->data_size;
|
|
Packit |
c04fcb |
if (caa_unlikely(subbuf_offset(offsets->begin, chan)
|
|
Packit |
c04fcb |
+ offsets->size > chan->backend.subbuf_size)) {
|
|
Packit |
c04fcb |
unsigned long nr_lost;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Record too big for subbuffers, report error, don't
|
|
Packit |
c04fcb |
* complete the sub-buffer switch.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
nr_lost = v_read(config, &buf->records_lost_big);
|
|
Packit |
c04fcb |
v_inc(config, &buf->records_lost_big);
|
|
Packit |
c04fcb |
if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
|
|
Packit |
c04fcb |
DBG("%lu or more records lost in (%s:%d) record size "
|
|
Packit |
c04fcb |
" of %zu bytes is too large for buffer\n",
|
|
Packit |
c04fcb |
nr_lost + 1, chan->backend.name,
|
|
Packit |
c04fcb |
buf->backend.cpu, offsets->size);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
return -ENOSPC;
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* We just made a successful buffer switch and the
|
|
Packit |
c04fcb |
* record fits in the new subbuffer. Let's write.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
} else {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Record fits in the current buffer and we are not on a switch
|
|
Packit |
c04fcb |
* boundary. It's safe to write.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
offsets->end = offsets->begin + offsets->size;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* The offset_end will fall at the very beginning of the next
|
|
Packit |
c04fcb |
* subbuffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
offsets->switch_new_end = 1; /* For offsets->begin */
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/**
|
|
Packit |
c04fcb |
* lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
|
|
Packit |
c04fcb |
* @ctx: ring buffer context.
|
|
Packit |
c04fcb |
*
|
|
Packit |
c04fcb |
* Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
|
|
Packit |
c04fcb |
* -EIO for other errors, else returns 0.
|
|
Packit |
c04fcb |
* It will take care of sub-buffer switching.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
struct channel *chan = ctx->chan;
|
|
Packit |
c04fcb |
struct lttng_ust_shm_handle *handle = ctx->handle;
|
|
Packit |
c04fcb |
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
|
|
Packit |
c04fcb |
struct lttng_ust_lib_ring_buffer *buf;
|
|
Packit |
c04fcb |
struct switch_offsets offsets;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
|
|
Packit |
c04fcb |
buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
|
|
Packit |
c04fcb |
else
|
|
Packit |
c04fcb |
buf = shmp(handle, chan->backend.buf[0].shmp);
|
|
Packit |
c04fcb |
ctx->buf = buf;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
offsets.size = 0;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
do {
|
|
Packit |
c04fcb |
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
|
|
Packit |
c04fcb |
ctx);
|
|
Packit |
c04fcb |
if (caa_unlikely(ret))
|
|
Packit |
c04fcb |
return ret;
|
|
Packit |
c04fcb |
} while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
|
|
Packit |
c04fcb |
offsets.end)
|
|
Packit |
c04fcb |
!= offsets.old));
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Atomically update last_tsc. This update races against concurrent
|
|
Packit |
c04fcb |
* atomic updates, but the race will always cause supplementary full TSC
|
|
Packit |
c04fcb |
* records, never the opposite (missing a full TSC record when it would
|
|
Packit |
c04fcb |
* be needed).
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
save_last_tsc(config, buf, ctx->tsc);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Push the reader if necessary
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Clear noref flag for this subbuffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
lib_ring_buffer_clear_noref(config, &buf->backend,
|
|
Packit |
c04fcb |
subbuf_index(offsets.end - 1, chan),
|
|
Packit |
c04fcb |
handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Switch old subbuffer if needed.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (caa_unlikely(offsets.switch_old_end)) {
|
|
Packit |
c04fcb |
lib_ring_buffer_clear_noref(config, &buf->backend,
|
|
Packit |
c04fcb |
subbuf_index(offsets.old - 1, chan),
|
|
Packit |
c04fcb |
handle);
|
|
Packit |
c04fcb |
lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc, handle);
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Populate new subbuffer.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
if (caa_unlikely(offsets.switch_new_start))
|
|
Packit |
c04fcb |
lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
if (caa_unlikely(offsets.switch_new_end))
|
|
Packit |
c04fcb |
lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
ctx->slot_size = offsets.size;
|
|
Packit |
c04fcb |
ctx->pre_offset = offsets.begin;
|
|
Packit |
c04fcb |
ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
|
|
Packit |
c04fcb |
return 0;
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Force a read (imply TLS fixup for dlopen) of TLS variables.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
void lttng_fixup_ringbuffer_tls(void)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
asm volatile ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
void lib_ringbuffer_signal_init(void)
|
|
Packit |
c04fcb |
{
|
|
Packit |
c04fcb |
sigset_t mask;
|
|
Packit |
c04fcb |
int ret;
|
|
Packit |
c04fcb |
|
|
Packit |
c04fcb |
/*
|
|
Packit |
c04fcb |
* Block signal for entire process, so only our thread processes
|
|
Packit |
c04fcb |
* it.
|
|
Packit |
c04fcb |
*/
|
|
Packit |
c04fcb |
rb_setmask(&mask);
|
|
Packit |
c04fcb |
ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
|
|
Packit |
c04fcb |
if (ret) {
|
|
Packit |
c04fcb |
errno = ret;
|
|
Packit |
c04fcb |
PERROR("pthread_sigmask");
|
|
Packit |
c04fcb |
}
|
|
Packit |
c04fcb |
}
|