Skip to content

Commit

Permalink
Fix warnings about bad function prototypes.
Browse files Browse the repository at this point in the history
  • Loading branch information
insertinterestingnamehere committed Nov 3, 2023
1 parent 50b32d3 commit 80033b8
Show file tree
Hide file tree
Showing 20 changed files with 79 additions and 69 deletions.
1 change: 1 addition & 0 deletions include/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ noinst_HEADERS = \
qt_int_log.h \
qt_io.h \
qt_feb.h \
qt_locks.h \
qt_syncvar.h \
qt_macros.h \
qt_mpool.h \
Expand Down
9 changes: 9 additions & 0 deletions include/qt_locks.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#ifndef _QT_LOCKS_H_
#define _QT_LOCKS_H_
#include "qt_visibility.h"

int INTERNAL spinlocks_finalize(void);
int INTERNAL spinlocks_initialize(void);

#endif // _QT_LOCKS_H_

1 change: 1 addition & 0 deletions include/qt_threadqueue_scheduler.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#ifndef _QT_THREADQUEUE_SCHEDULER_H_
#define _QT_THREADQUEUE_SCHEDULER_H_
#include "qt_shepherd_innards.h"

qthread_shepherd_id_t INTERNAL qt_threadqueue_choose_dest(qthread_shepherd_t * curr_shep);

Expand Down
33 changes: 17 additions & 16 deletions src/locks.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "qt_hash.h"
#include "qt_alloc.h"
#include "qt_feb.h"
#include "qt_locks.h"

#include <stdbool.h>
#include <assert.h>
Expand All @@ -30,14 +31,14 @@ static void qthread_spinlock_destroy_fn(qthread_spinlock_t *l) {
QTHREAD_TRYLOCK_DESTROY_PTR(&l->lock);
}

INTERNAL qthread_spinlock_t * lock_hashmap_get(const aligned_t * key) {
static INTERNAL qthread_spinlock_t * lock_hashmap_get(const aligned_t * key) {
if(!qthread_spinlock_buckets)
return NULL;

return qt_hash_get(qthread_spinlock_buckets[LOCKBIN(key)], key);
}

INTERNAL int lock_hashmap_put(const aligned_t * key, qthread_spinlock_t * val) {
static INTERNAL int lock_hashmap_put(const aligned_t * key, qthread_spinlock_t * val) {
if(!qthread_spinlock_buckets)
return QTHREAD_OPFAIL;

Expand All @@ -47,7 +48,7 @@ INTERNAL int lock_hashmap_put(const aligned_t * key, qthread_spinlock_t * val) {
return QTHREAD_OPFAIL;
}

INTERNAL int lock_hashmap_remove(const aligned_t * key) {
static INTERNAL int lock_hashmap_remove(const aligned_t * key) {
if(!qthread_spinlock_buckets)
return QTHREAD_OPFAIL;

Expand All @@ -57,16 +58,16 @@ INTERNAL int lock_hashmap_remove(const aligned_t * key) {
return QTHREAD_OPFAIL;
}

INTERNAL bool is_spin_lock_hashed(const aligned_t * a) {
static INTERNAL bool is_spin_lock_hashed(const aligned_t * a) {
return (NULL != lock_hashmap_get(a));
}

INTERNAL int spinlocks_initialize() {
INTERNAL int spinlocks_initialize(void) {
qthread_spinlock_buckets = NULL;
return QTHREAD_SUCCESS;
}

INTERNAL int spinlocks_finalize() {
INTERNAL int spinlocks_finalize(void) {
if(qthread_spinlock_buckets){
for (unsigned i = 0; i < QTHREAD_LOCKING_STRIPES; i++) {
assert(qthread_spinlock_buckets[i]);
Expand All @@ -81,7 +82,7 @@ INTERNAL int spinlocks_finalize() {

/* locks over addresses using internal hashmaps */

INTERNAL int spinlock_init_hashed(const aligned_t * a, const bool is_recursive) {
static INTERNAL int spinlock_init_hashed(const aligned_t * a, const bool is_recursive) {
uint_fast8_t need_sync = 1;

if(!qthread_spinlock_buckets){
Expand All @@ -105,11 +106,11 @@ INTERNAL int spinlock_init_hashed(const aligned_t * a, const bool is_recursive)
return QTHREAD_OPFAIL;
}

INTERNAL int spinlock_destroy_hashed(const aligned_t * a) {
static INTERNAL int spinlock_destroy_hashed(const aligned_t * a) {
return lock_hashmap_remove(a);
}

INTERNAL int spinlock_lock_hashed(const aligned_t * a) {
static INTERNAL int spinlock_lock_hashed(const aligned_t * a) {
qthread_spinlock_t * l = lock_hashmap_get(a);
if (l != NULL) {
if (l->state.s >= SPINLOCK_IS_RECURSIVE) {
Expand All @@ -130,7 +131,7 @@ INTERNAL int spinlock_lock_hashed(const aligned_t * a) {
return QTHREAD_OPFAIL;
}

INTERNAL int spinlock_trylock_hashed(const aligned_t * a) {
static INTERNAL int spinlock_trylock_hashed(const aligned_t * a) {
qthread_spinlock_t * l = lock_hashmap_get(a);
if (l != NULL) {
if (l->state.s >= SPINLOCK_IS_RECURSIVE) {
Expand All @@ -155,7 +156,7 @@ INTERNAL int spinlock_trylock_hashed(const aligned_t * a) {
return QTHREAD_OPFAIL;
}

INTERNAL int spinlock_unlock_hashed(const aligned_t * a) {
static INTERNAL int spinlock_unlock_hashed(const aligned_t * a) {
qthread_spinlock_t * l = lock_hashmap_get(a);
if (l != NULL) {
if (l->state.s >= SPINLOCK_IS_RECURSIVE) {
Expand All @@ -180,7 +181,7 @@ INTERNAL int spinlock_unlock_hashed(const aligned_t * a) {

/* locks over lock types externally allocated */

INTERNAL int spinlock_init(qthread_spinlock_t * a, const bool is_recursive) {
static INTERNAL int spinlock_init(qthread_spinlock_t * a, const bool is_recursive) {
if (is_recursive) {
const qthread_spinlock_t init_mutex = QTHREAD_RECURSIVE_MUTEX_INITIALIZER;
memcpy(a, &init_mutex, sizeof(qthread_spinlock_t));
Expand All @@ -191,11 +192,11 @@ INTERNAL int spinlock_init(qthread_spinlock_t * a, const bool is_recursive) {
return QTHREAD_SUCCESS;
}

INTERNAL int spinlock_destroy(qthread_spinlock_t * a) {
static INTERNAL int spinlock_destroy(qthread_spinlock_t * a) {
return QTHREAD_SUCCESS;
}

INTERNAL int spinlock_lock(qthread_spinlock_t * a) {
static INTERNAL int spinlock_lock(qthread_spinlock_t * a) {
qthread_spinlock_t * l = a;
if (l != NULL) {
if (l->state.s >= SPINLOCK_IS_RECURSIVE) {
Expand All @@ -216,7 +217,7 @@ INTERNAL int spinlock_lock(qthread_spinlock_t * a) {
return QTHREAD_OPFAIL;
}

INTERNAL int spinlock_trylock(qthread_spinlock_t * a) {
static INTERNAL int spinlock_trylock(qthread_spinlock_t * a) {
qthread_spinlock_t * l = a;
if (l != NULL) {
if (l->state.s >= SPINLOCK_IS_RECURSIVE) {
Expand All @@ -241,7 +242,7 @@ INTERNAL int spinlock_trylock(qthread_spinlock_t * a) {
return QTHREAD_OPFAIL;
}

INTERNAL int spinlock_unlock(qthread_spinlock_t * a) {
static INTERNAL int spinlock_unlock(qthread_spinlock_t * a) {
qthread_spinlock_t * l = a;
if (l != NULL) {
if (l->state.s >= SPINLOCK_IS_RECURSIVE) {
Expand Down
6 changes: 2 additions & 4 deletions src/qthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@
#include "qt_envariables.h"
#include "qt_queue.h"
#include "qt_feb.h"
#include "qt_locks.h"
#include "qt_syncvar.h"
#include "qt_spawncache.h"
#ifdef QTHREAD_MULTINODE
Expand Down Expand Up @@ -131,9 +132,6 @@ int GUARD_PAGES = 1;
#define GUARD_PAGES 0
#endif

extern int INTERNAL spinlocks_finalize();
extern int INTERNAL spinlocks_initialize();

/* Internal Prototypes */
#ifdef QTHREAD_MAKECONTEXT_SPLIT
static void qthread_wrapper(unsigned int high,
Expand Down Expand Up @@ -3053,7 +3051,7 @@ void qt_set_barrier(qt_barrier_t *bar)
me->rdata->barrier = bar;
} /*}}} */

qt_barrier_t *qt_get_barrier()
qt_barrier_t *qt_get_barrier(void)
{ /*{{{ */
qthread_t *me = qthread_internal_self();

Expand Down
2 changes: 1 addition & 1 deletion src/qtimer/gettime.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ double qtimer_secs(qtimer_t q)
return (q->stop.tv_sec + q->stop.tv_nsec * 1e-9) - (q->start.tv_sec + q->start.tv_nsec * 1e-9);
}

qtimer_t qtimer_create()
qtimer_t qtimer_create(void)
{
qtimer_t ret = qt_calloc(1, sizeof(struct qtimer_s));

Expand Down
2 changes: 2 additions & 0 deletions src/sincs/donecount.c
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,8 @@ void API_FUNC qt_sinc_reset(qt_sinc_t *sinc_,
} /*}}}*/

// add akp for power throttling
void API_FUNC qt_sinc_resize(qt_sinc_t *sinc_,
const size_t diff);
void API_FUNC qt_sinc_resize(qt_sinc_t *sinc_,
const size_t diff)
{ /*{{{*/
Expand Down
4 changes: 2 additions & 2 deletions src/syscalls/nanosleep.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@
#include "qthread_innards.h" /* for qlib */
#include "qt_qthread_mgmt.h"

int qt_nanosleep(const struct timespec *rqtp,
#if HAVE_SYSCALL && HAVE_DECL_SYS_NANOSLEEP
static int qt_nanosleep(const struct timespec *rqtp,
struct timespec *rmtp)
{
if (qt_blockable()) {
Expand All @@ -48,7 +49,6 @@ int qt_nanosleep(const struct timespec *rqtp,
}
}

#if HAVE_SYSCALL && HAVE_DECL_SYS_NANOSLEEP
int nanosleep(const struct timespec *rqtp,
struct timespec *rmtp)
{
Expand Down
4 changes: 2 additions & 2 deletions src/syscalls/sleep.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
#include "qthread_innards.h" /* for qlib */
#include "qt_qthread_mgmt.h"

unsigned int qt_sleep(unsigned int seconds)
#if HAVE_SYSCALL && HAVE_DECL_SYS_SLEEP
static unsigned int qt_sleep(unsigned int seconds)
{
if (qt_blockable()) {
qtimer_t t = qtimer_create();
Expand All @@ -36,7 +37,6 @@ unsigned int qt_sleep(unsigned int seconds)
}
}

#if HAVE_SYSCALL && HAVE_DECL_SYS_SLEEP
unsigned int sleep(unsigned int seconds)
{
if (qt_blockable()) {
Expand Down
5 changes: 2 additions & 3 deletions src/syscalls/usleep.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
#include "qthread_innards.h" /* for qlib */
#include "qt_qthread_mgmt.h"

int qt_usleep(useconds_t useconds)
#if HAVE_SYSCALL && HAVE_DECL_SYS_USLEEP
static int qt_usleep(useconds_t useconds)
{
if (qt_blockable()) {
qtimer_t t = qtimer_create();
Expand All @@ -36,8 +37,6 @@ int qt_usleep(useconds_t useconds)
}
}


#if HAVE_SYSCALL && HAVE_DECL_SYS_USLEEP
int usleep(useconds_t useconds)
{
if (qt_blockable()) {
Expand Down
23 changes: 12 additions & 11 deletions src/threadqueues/distrib_threadqueues.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "qt_asserts.h"
#include "qt_prefetch.h"
#include "qt_threadqueues.h"
#include "qt_threadqueue_scheduler.h"
#include "qt_envariables.h"
#include "qt_debug.h"
#ifdef QTHREAD_USE_EUREKAS
Expand Down Expand Up @@ -76,7 +77,7 @@ qt_threadqueue_pools_t generic_threadqueue_pools;
#define mycounter(q) (q->w_inds[qthread_worker(NULL) % (qlib->nshepherds * qlib->nworkerspershep)].n)
#define myqueue(q) (q->t + mycounter(q))

static qt_threadqueue_t* alloc_threadqueue(){
static qt_threadqueue_t* alloc_threadqueue(void){
qt_threadqueue_t* t = (qt_threadqueue_t *)qt_mpool_alloc(generic_threadqueue_pools.queues);
t->num_queues = qlib->nworkerspershep; // Assumption built into api of constant number of workers per shepherd
t->t = qt_malloc(sizeof(qt_threadqueue_internal) * t->num_queues);
Expand Down Expand Up @@ -155,12 +156,12 @@ void INTERNAL qt_threadqueue_free(qt_threadqueue_t *qe){
free_threadqueue(qe);
}

static void qt_threadqueue_subsystem_shutdown(){
static void qt_threadqueue_subsystem_shutdown(void){
qt_mpool_destroy(generic_threadqueue_pools.nodes);
qt_mpool_destroy(generic_threadqueue_pools.queues);
}

void INTERNAL qt_threadqueue_subsystem_init(){
void INTERNAL qt_threadqueue_subsystem_init(void){
steal_ratio = qt_internal_get_env_num("STEAL_RATIO", 8, 0);
condwait_backoff = qt_internal_get_env_num("CONDWAIT_BACKOFF", 2048, 0);
finalizing = 0;
Expand All @@ -177,8 +178,8 @@ ssize_t INTERNAL qt_threadqueue_advisory_queuelen(qt_threadqueue_t *q){

/* Threadqueue operations
* We have 4 basic queue operations, enqueue and dequeue for head and tail */
void INTERNAL qt_threadqueue_enqueue_tail(qt_threadqueue_t *restrict qe,
qthread_t *restrict t){
static void INTERNAL qt_threadqueue_enqueue_tail(qt_threadqueue_t *restrict qe,
qthread_t *restrict t){
if (t->thread_state == QTHREAD_STATE_TERM_SHEP) {
finalizing = 1;
}
Expand Down Expand Up @@ -219,8 +220,8 @@ void INTERNAL qt_threadqueue_enqueue_tail(qt_threadqueue_t *restrict qe,
}
}

void INTERNAL qt_threadqueue_enqueue_head(qt_threadqueue_t *restrict qe,
qthread_t *restrict t){
static void INTERNAL qt_threadqueue_enqueue_head(qt_threadqueue_t *restrict qe,
qthread_t *restrict t){
if (t->flags & QTHREAD_REAL_MCCOY) { // only needs to be on worker 0 for termination
if(mccoy) {
printf("mccoy thread non-null and trying to set!\n");
Expand Down Expand Up @@ -255,7 +256,7 @@ void INTERNAL qt_threadqueue_enqueue_head(qt_threadqueue_t *restrict qe,
}
}

qt_threadqueue_node_t INTERNAL *qt_threadqueue_dequeue_tail(qt_threadqueue_t *qe){
static qt_threadqueue_node_t INTERNAL *qt_threadqueue_dequeue_tail(qt_threadqueue_t *qe){
qt_threadqueue_internal* q = myqueue(qe);
mycounter(qe) = (mycounter(qe) + 1) % qe->num_queues;
qt_threadqueue_node_t *node;
Expand All @@ -278,7 +279,7 @@ qt_threadqueue_node_t INTERNAL *qt_threadqueue_dequeue_tail(qt_threadqueue_t *qe
return node;
}

qt_threadqueue_node_t INTERNAL *qt_threadqueue_dequeue_head(qt_threadqueue_t *qe){
static qt_threadqueue_node_t INTERNAL *qt_threadqueue_dequeue_head(qt_threadqueue_t *qe){
qt_threadqueue_internal* q = myqueue(qe);
mycounter(qe) = (mycounter(qe) + 1) % qe->num_queues;
qt_threadqueue_node_t *node;
Expand Down Expand Up @@ -386,10 +387,10 @@ qthread_t INTERNAL *qt_scheduler_get_thread(qt_threadqueue_t *qe,
return t;
}

void INTERNAL qthread_steal_enable(){
void INTERNAL qthread_steal_enable(void){
}

void INTERNAL qthread_steal_disable(){
void INTERNAL qthread_steal_disable(void){
}

qthread_shepherd_id_t INTERNAL qt_threadqueue_choose_dest(qthread_shepherd_t * curr_shep){
Expand Down
5 changes: 3 additions & 2 deletions src/threadqueues/nemesis_threadqueues.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "qt_asserts.h"
#include "qt_prefetch.h"
#include "qt_threadqueues.h"
#include "qt_threadqueue_scheduler.h"
#include "qt_envariables.h"
#include "qt_qthread_struct.h"
#include "qt_debug.h"
Expand Down Expand Up @@ -226,8 +227,8 @@ void INTERNAL qt_threadqueue_private_filter(qt_threadqueue_private_t *restrict c
{}
#endif /* ifdef QTHREAD_USE_SPAWNCACHE */

void INTERNAL qthread_steal_enable() {}
void INTERNAL qthread_steal_disable() {}
void INTERNAL qthread_steal_enable(void) {}
void INTERNAL qthread_steal_disable(void) {}

#ifdef QTHREAD_PARANOIA
static void sanity_check_tq(NEMESIS_queue *q)
Expand Down
7 changes: 4 additions & 3 deletions src/threadqueues/sherwood_threadqueues.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include "qt_asserts.h"
#include "qt_prefetch.h"
#include "qt_threadqueues.h"
#include "qt_threadqueue_scheduler.h"
#include "qt_envariables.h"
#include "qt_debug.h"
#ifdef QTHREAD_USE_EUREKAS
Expand Down Expand Up @@ -472,7 +473,7 @@ void INTERNAL qt_threadqueue_enqueue_yielded(qt_threadqueue_t *restrict q,
!(f &QTHREAD_FUTURE) && !(f &QTHREAD_REAL_MCCOY) && \
!(f &QTHREAD_AGGREGATED))

qthread_t INTERNAL *qt_init_agg_task() // partly a duplicate from qthread.c
qthread_t INTERNAL *qt_init_agg_task(void) // partly a duplicate from qthread.c
{
qthread_t *t = ALLOC_QTHREAD();

Expand Down Expand Up @@ -1483,12 +1484,12 @@ qthread_t INTERNAL *qt_threadqueue_dequeue_specific(qt_threadqueue_t *q,
return (t);
} /*}}}*/

void INTERNAL qthread_steal_enable()
void INTERNAL qthread_steal_enable(void)
{ /*{{{*/
steal_disable = 0;
} /*}}}*/

void INTERNAL qthread_steal_disable()
void INTERNAL qthread_steal_disable(void)
{ /*{{{*/
steal_disable = 1;
} /*}}}*/
Expand Down
Loading

0 comments on commit 80033b8

Please sign in to comment.