Commit d3476f7d authored by Sasha Levin's avatar Sasha Levin Committed by Will Deacon
Browse files

kvm tools: use mutex abstraction instead of pthread mutex



We already have something to wrap pthread with mutex_[init,lock,unlock]
calls. This patch creates a new struct mutex abstraction and moves
everything to work with it.

Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent a4d8c55e
......@@ -22,7 +22,7 @@
#define UART_IIR_TYPE_BITS 0xc0
struct serial8250_device {
pthread_mutex_t mutex;
struct mutex mutex;
u8 id;
u16 iobase;
......@@ -55,7 +55,7 @@ struct serial8250_device {
static struct serial8250_device devices[] = {
/* ttyS0 */
[0] = {
.mutex = PTHREAD_MUTEX_INITIALIZER,
.mutex = MUTEX_INITIALIZER,
.id = 0,
.iobase = 0x3f8,
......@@ -65,7 +65,7 @@ static struct serial8250_device devices[] = {
},
/* ttyS1 */
[1] = {
.mutex = PTHREAD_MUTEX_INITIALIZER,
.mutex = MUTEX_INITIALIZER,
.id = 1,
.iobase = 0x2f8,
......@@ -75,7 +75,7 @@ static struct serial8250_device devices[] = {
},
/* ttyS2 */
[2] = {
.mutex = PTHREAD_MUTEX_INITIALIZER,
.mutex = MUTEX_INITIALIZER,
.id = 2,
.iobase = 0x3e8,
......@@ -85,7 +85,7 @@ static struct serial8250_device devices[] = {
},
/* ttyS3 */
[3] = {
.mutex = PTHREAD_MUTEX_INITIALIZER,
.mutex = MUTEX_INITIALIZER,
.id = 3,
.iobase = 0x2e8,
......
......@@ -10,23 +10,29 @@
* to write user-space code! :-)
*/
#define DEFINE_MUTEX(mutex) pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER
struct mutex {
pthread_mutex_t mutex;
};
#define MUTEX_INITIALIZER (struct mutex) { .mutex = PTHREAD_MUTEX_INITIALIZER }
static inline void mutex_init(pthread_mutex_t *mutex)
#define DEFINE_MUTEX(mtx) struct mutex mtx = MUTEX_INITIALIZER
static inline void mutex_init(struct mutex *lock)
{
if (pthread_mutex_init(mutex, NULL) != 0)
if (pthread_mutex_init(&lock->mutex, NULL) != 0)
die("unexpected pthread_mutex_init() failure!");
}
static inline void mutex_lock(pthread_mutex_t *mutex)
static inline void mutex_lock(struct mutex *lock)
{
if (pthread_mutex_lock(mutex) != 0)
if (pthread_mutex_lock(&lock->mutex) != 0)
die("unexpected pthread_mutex_lock() failure!");
}
static inline void mutex_unlock(pthread_mutex_t *mutex)
static inline void mutex_unlock(struct mutex *lock)
{
if (pthread_mutex_unlock(mutex) != 0)
if (pthread_mutex_unlock(&lock->mutex) != 0)
die("unexpected pthread_mutex_unlock() failure!");
}
......
......@@ -74,7 +74,7 @@ struct qcow_header {
};
struct qcow {
pthread_mutex_t mutex;
struct mutex mutex;
struct qcow_header *header;
struct qcow_l1_table table;
struct qcow_refcount_table refcount_table;
......
......@@ -15,7 +15,7 @@ struct thread_pool__job {
void *data;
int signalcount;
pthread_mutex_t mutex;
struct mutex mutex;
struct list_head queue;
};
......@@ -26,7 +26,7 @@ static inline void thread_pool__init_job(struct thread_pool__job *job, struct kv
.kvm = kvm,
.callback = callback,
.data = data,
.mutex = PTHREAD_MUTEX_INITIALIZER,
.mutex = MUTEX_INITIALIZER,
};
}
......
......@@ -187,14 +187,14 @@ struct uip_dhcp {
struct uip_info {
struct list_head udp_socket_head;
struct list_head tcp_socket_head;
pthread_mutex_t udp_socket_lock;
pthread_mutex_t tcp_socket_lock;
struct mutex udp_socket_lock;
struct mutex tcp_socket_lock;
struct uip_eth_addr guest_mac;
struct uip_eth_addr host_mac;
pthread_cond_t buf_free_cond;
pthread_cond_t buf_used_cond;
struct list_head buf_head;
pthread_mutex_t buf_lock;
struct mutex buf_lock;
pthread_t udp_thread;
int udp_epollfd;
int buf_free_nr;
......@@ -221,7 +221,7 @@ struct uip_buf {
struct uip_udp_socket {
struct sockaddr_in addr;
struct list_head list;
pthread_mutex_t *lock;
struct mutex *lock;
u32 dport, sport;
u32 dip, sip;
int fd;
......@@ -232,7 +232,7 @@ struct uip_tcp_socket {
struct list_head list;
struct uip_info *info;
pthread_cond_t cond;
pthread_mutex_t *lock;
struct mutex *lock;
pthread_t thread;
u32 dport, sport;
u32 guest_acked;
......
......@@ -11,7 +11,7 @@ struct uip_buf *uip_buf_get_used(struct uip_info *info)
mutex_lock(&info->buf_lock);
while (!(info->buf_used_nr > 0))
pthread_cond_wait(&info->buf_used_cond, &info->buf_lock);
pthread_cond_wait(&info->buf_used_cond, &info->buf_lock.mutex);
list_for_each_entry(buf, &info->buf_head, list) {
if (buf->status == UIP_BUF_STATUS_USED) {
......@@ -39,7 +39,7 @@ struct uip_buf *uip_buf_get_free(struct uip_info *info)
mutex_lock(&info->buf_lock);
while (!(info->buf_free_nr > 0))
pthread_cond_wait(&info->buf_free_cond, &info->buf_lock);
pthread_cond_wait(&info->buf_free_cond, &info->buf_lock.mutex);
list_for_each_entry(buf, &info->buf_head, list) {
if (buf->status == UIP_BUF_STATUS_FREE) {
......
......@@ -153,9 +153,9 @@ int uip_init(struct uip_info *info)
INIT_LIST_HEAD(tcp_socket_head);
INIT_LIST_HEAD(buf_head);
pthread_mutex_init(&info->udp_socket_lock, NULL);
pthread_mutex_init(&info->tcp_socket_lock, NULL);
pthread_mutex_init(&info->buf_lock, NULL);
mutex_init(&info->udp_socket_lock);
mutex_init(&info->tcp_socket_lock);
mutex_init(&info->buf_lock);
pthread_cond_init(&info->buf_used_cond, NULL);
pthread_cond_init(&info->buf_free_cond, NULL);
......
......@@ -27,7 +27,7 @@ static int uip_tcp_socket_close(struct uip_tcp_socket *sk, int how)
static struct uip_tcp_socket *uip_tcp_socket_find(struct uip_tx_arg *arg, u32 sip, u32 dip, u16 sport, u16 dport)
{
struct list_head *sk_head;
pthread_mutex_t *sk_lock;
struct mutex *sk_lock;
struct uip_tcp_socket *sk;
sk_head = &arg->info->tcp_socket_head;
......@@ -49,7 +49,7 @@ static struct uip_tcp_socket *uip_tcp_socket_alloc(struct uip_tx_arg *arg, u32 s
{
struct list_head *sk_head;
struct uip_tcp_socket *sk;
pthread_mutex_t *sk_lock;
struct mutex *sk_lock;
struct uip_tcp *tcp;
struct uip_ip *ip;
int ret;
......@@ -198,7 +198,7 @@ static void *uip_tcp_socket_thread(void *p)
while (left > 0) {
mutex_lock(sk->lock);
while ((len = sk->guest_acked + sk->window_size - sk->seq_server) <= 0)
pthread_cond_wait(&sk->cond, sk->lock);
pthread_cond_wait(&sk->cond, &sk->lock->mutex);
mutex_unlock(sk->lock);
sk->payload = pos;
......
......@@ -14,7 +14,7 @@ static struct uip_udp_socket *uip_udp_socket_find(struct uip_tx_arg *arg, u32 si
{
struct list_head *sk_head;
struct uip_udp_socket *sk;
pthread_mutex_t *sk_lock;
struct mutex *sk_lock;
struct epoll_event ev;
int flags;
int ret;
......
......@@ -7,9 +7,9 @@
#include <pthread.h>
#include <stdbool.h>
static pthread_mutex_t job_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t thread_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t job_cond = PTHREAD_COND_INITIALIZER;
static DEFINE_MUTEX(job_mutex);
static DEFINE_MUTEX(thread_mutex);
static pthread_cond_t job_cond = PTHREAD_COND_INITIALIZER;
static LIST_HEAD(head);
......@@ -85,7 +85,7 @@ static void *thread_pool__threadfunc(void *param)
mutex_lock(&job_mutex);
while (running && (curjob = thread_pool__job_pop_locked()) == NULL)
pthread_cond_wait(&job_cond, &job_mutex);
pthread_cond_wait(&job_cond, &job_mutex.mutex);
mutex_unlock(&job_mutex);
if (running)
......
......@@ -37,7 +37,7 @@ struct blk_dev_req {
};
struct blk_dev {
pthread_mutex_t mutex;
struct mutex mutex;
struct list_head list;
......@@ -248,7 +248,7 @@ static int virtio_blk__init_one(struct kvm *kvm, struct disk_image *disk)
return -ENOMEM;
*bdev = (struct blk_dev) {
.mutex = PTHREAD_MUTEX_INITIALIZER,
.mutex = MUTEX_INITIALIZER,
.disk = disk,
.blk_config = (struct virtio_blk_config) {
.capacity = disk->size / SECTOR_SIZE,
......
......@@ -29,7 +29,7 @@
#define VIRTIO_CONSOLE_TX_QUEUE 1
struct con_dev {
pthread_mutex_t mutex;
struct mutex mutex;
struct virtio_device vdev;
struct virt_queue vqs[VIRTIO_CONSOLE_NUM_QUEUES];
......@@ -40,7 +40,7 @@ struct con_dev {
};
static struct con_dev cdev = {
.mutex = PTHREAD_MUTEX_INITIALIZER,
.mutex = MUTEX_INITIALIZER,
.config = {
.cols = 80,
......
......@@ -39,7 +39,7 @@ struct net_dev_operations {
};
struct net_dev {
pthread_mutex_t mutex;
struct mutex mutex;
struct virtio_device vdev;
struct list_head list;
......@@ -48,11 +48,11 @@ struct net_dev {
u32 features;
pthread_t io_rx_thread;
pthread_mutex_t io_rx_lock;
struct mutex io_rx_lock;
pthread_cond_t io_rx_cond;
pthread_t io_tx_thread;
pthread_mutex_t io_tx_lock;
struct mutex io_tx_lock;
pthread_cond_t io_tx_cond;
int vhost_fd;
......@@ -87,7 +87,7 @@ static void *virtio_net_rx_thread(void *p)
while (1) {
mutex_lock(&ndev->io_rx_lock);
if (!virt_queue__available(vq))
pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock);
pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock.mutex);
mutex_unlock(&ndev->io_rx_lock);
while (virt_queue__available(vq)) {
......@@ -125,7 +125,7 @@ static void *virtio_net_tx_thread(void *p)
while (1) {
mutex_lock(&ndev->io_tx_lock);
if (!virt_queue__available(vq))
pthread_cond_wait(&ndev->io_tx_cond, &ndev->io_tx_lock);
pthread_cond_wait(&ndev->io_tx_cond, &ndev->io_tx_lock.mutex);
mutex_unlock(&ndev->io_tx_lock);
while (virt_queue__available(vq)) {
......@@ -252,8 +252,8 @@ fail:
static void virtio_net__io_thread_init(struct kvm *kvm, struct net_dev *ndev)
{
pthread_mutex_init(&ndev->io_tx_lock, NULL);
pthread_mutex_init(&ndev->io_rx_lock, NULL);
mutex_init(&ndev->io_tx_lock);
mutex_init(&ndev->io_rx_lock);
pthread_cond_init(&ndev->io_tx_cond, NULL);
pthread_cond_init(&ndev->io_rx_cond, NULL);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment