kernel: workq: Fix type errors in delayable work handlers

A common pattern here was to take the work item as the subfield of a
containing object. But the contained field is not a k_work, it's a
k_work_delayable.

Things were working only because the work field was first, so the
pointers had the same value. Do things right and fix things to
produce correct code if/when that field ever moves within delayable.

Signed-off-by: Yong Cong Sin <yongcong.sin@gmail.com>
This commit is contained in:
Yong Cong Sin 2022-01-19 12:07:51 +08:00 committed by Christopher Friedt
parent 0d70ee6017
commit 3d61857d2f
27 changed files with 72 additions and 37 deletions

View File

@ -610,7 +610,8 @@ static void ctrl_msg_cleanup(struct gsm_control_msg *entry, bool pending)
/* T2 timeout is for control message retransmits */
static void gsm_mux_t2_timeout(struct k_work *work)
{
struct gsm_mux *mux = CONTAINER_OF(work, struct gsm_mux, t2_timer);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct gsm_mux *mux = CONTAINER_OF(dwork, struct gsm_mux, t2_timer);
uint32_t current_time = k_uptime_get_32();
struct gsm_control_msg *entry, *next;

View File

@ -217,8 +217,9 @@ static void invoke_link_cb(const struct device *dev)
static void monitor_work_handler(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct phy_mii_dev_data *const data =
CONTAINER_OF(work, struct phy_mii_dev_data, monitor_work);
CONTAINER_OF(dwork, struct phy_mii_dev_data, monitor_work);
const struct device *dev = data->dev;
int rc;

View File

@ -186,7 +186,8 @@ static int uart_sam0_tx_halt(struct uart_sam0_dev_data *dev_data)
static void uart_sam0_tx_timeout(struct k_work *work)
{
struct uart_sam0_dev_data *dev_data = CONTAINER_OF(work,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct uart_sam0_dev_data *dev_data = CONTAINER_OF(dwork,
struct uart_sam0_dev_data, tx_timeout_work);
uart_sam0_tx_halt(dev_data);

View File

@ -1234,7 +1234,8 @@ static int uart_stm32_async_tx_abort(const struct device *dev)
static void uart_stm32_async_rx_timeout(struct k_work *work)
{
struct uart_dma_stream *rx_stream = CONTAINER_OF(work,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct uart_dma_stream *rx_stream = CONTAINER_OF(dwork,
struct uart_dma_stream, timeout_work);
struct uart_stm32_data *data = CONTAINER_OF(rx_stream,
struct uart_stm32_data, dma_rx);
@ -1251,7 +1252,8 @@ static void uart_stm32_async_rx_timeout(struct k_work *work)
static void uart_stm32_async_tx_timeout(struct k_work *work)
{
struct uart_dma_stream *tx_stream = CONTAINER_OF(work,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct uart_dma_stream *tx_stream = CONTAINER_OF(dwork,
struct uart_dma_stream, timeout_work);
struct uart_stm32_data *data = CONTAINER_OF(tx_stream,
struct uart_stm32_data, dma_tx);

View File

@ -418,7 +418,8 @@ MODEM_CMD_DEFINE(on_cmd_cipsta)
static void esp_ip_addr_work(struct k_work *work)
{
struct esp_data *dev = CONTAINER_OF(work, struct esp_data,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct esp_data *dev = CONTAINER_OF(dwork, struct esp_data,
ip_addr_work);
int ret;

View File

@ -91,10 +91,11 @@ static void eswifi_off_read_work(struct k_work *work)
int next_timeout_ms = 100;
int err, len;
char *data;
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
LOG_DBG("");
socket = CONTAINER_OF(work, struct eswifi_off_socket, read_work);
socket = CONTAINER_OF(dwork, struct eswifi_off_socket, read_work);
eswifi = eswifi_socket_to_dev(socket);
eswifi_lock(eswifi);

View File

@ -62,8 +62,9 @@ static int compare_udp_data(struct data *data, const char *buf, uint32_t receive
static void wait_reply(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
/* This means that we did not receive response in time. */
struct data *data = CONTAINER_OF(work, struct data, udp.recv);
struct data *data = CONTAINER_OF(dwork, struct data, udp.recv);
LOG_ERR("UDP %s: Data packet not received", data->proto);
@ -73,7 +74,8 @@ static void wait_reply(struct k_work *work)
static void wait_transmit(struct k_work *work)
{
struct data *data = CONTAINER_OF(work, struct data, udp.transmit);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct data *data = CONTAINER_OF(dwork, struct data, udp.transmit);
send_udp_data(data);
}

View File

@ -347,7 +347,8 @@ static void process_tcp6(void)
static void print_stats(struct k_work *work)
{
struct data *data = CONTAINER_OF(work, struct data, tcp.stats_print);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct data *data = CONTAINER_OF(dwork, struct data, tcp.stats_print);
int total_received = atomic_get(&data->tcp.bytes_received);
if (total_received) {

View File

@ -188,7 +188,8 @@ static void process_udp6(void)
static void print_stats(struct k_work *work)
{
struct data *data = CONTAINER_OF(work, struct data, udp.stats_print);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct data *data = CONTAINER_OF(dwork, struct data, udp.stats_print);
int total_received = atomic_get(&data->udp.bytes_received);
if (total_received) {

View File

@ -2583,7 +2583,8 @@ static void att_chan_detach(struct bt_att_chan *chan)
static void att_timeout(struct k_work *work)
{
struct bt_att_chan *chan = CONTAINER_OF(work, struct bt_att_chan,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct bt_att_chan *chan = CONTAINER_OF(dwork, struct bt_att_chan,
timeout_work);
BT_ERR("ATT Timeout");

View File

@ -1520,7 +1520,8 @@ static struct bt_conn *conn_lookup_iso(struct bt_conn *conn)
static void deferred_work(struct k_work *work)
{
struct bt_conn *conn = CONTAINER_OF(work, struct bt_conn, deferred_work);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct bt_conn *conn = CONTAINER_OF(dwork, struct bt_conn, deferred_work);
const struct bt_le_conn_param *param;
BT_DBG("conn %p", conn);

View File

@ -990,7 +990,8 @@ static void sc_indicate_rsp(struct bt_conn *conn,
static void sc_process(struct k_work *work)
{
struct gatt_sc *sc = CONTAINER_OF(work, struct gatt_sc, work);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct gatt_sc *sc = CONTAINER_OF(dwork, struct gatt_sc, work);
uint16_t sc_range[2];
__ASSERT(!atomic_test_bit(sc->flags, SC_INDICATE_PENDING),
@ -1074,8 +1075,9 @@ static bool gatt_ccc_conn_queue_is_empty(void)
static void ccc_delayed_store(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct gatt_ccc_store *ccc_store =
CONTAINER_OF(work, struct gatt_ccc_store, work);
CONTAINER_OF(dwork, struct gatt_ccc_store, work);
for (size_t i = 0; i < CONFIG_BT_MAX_CONN; i++) {
struct bt_conn *conn = ccc_store->conn_list[i];

View File

@ -1022,7 +1022,8 @@ static void smp_pairing_br_complete(struct bt_smp_br *smp, uint8_t status)
static void smp_br_timeout(struct k_work *work)
{
struct bt_smp_br *smp = CONTAINER_OF(work, struct bt_smp_br, work);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct bt_smp_br *smp = CONTAINER_OF(dwork, struct bt_smp_br, work);
BT_ERR("SMP Timeout");

View File

@ -51,10 +51,11 @@
static void proxy_sar_timeout(struct k_work *work)
{
struct bt_mesh_proxy_role *role;
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
BT_WARN("Proxy SAR timeout");
role = CONTAINER_OF(work, struct bt_mesh_proxy_role, sar_timer);
role = CONTAINER_OF(dwork, struct bt_mesh_proxy_role, sar_timer);
if (role->conn) {
bt_conn_disconnect(role->conn,
BT_HCI_ERR_REMOTE_USER_TERM_CONN);

View File

@ -448,7 +448,8 @@ end:
static void seg_retransmit(struct k_work *work)
{
struct seg_tx *tx = CONTAINER_OF(work, struct seg_tx, retransmit);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct seg_tx *tx = CONTAINER_OF(dwork, struct seg_tx, retransmit);
seg_tx_send_unacked(tx);
}
@ -1128,7 +1129,8 @@ static void seg_rx_reset(struct seg_rx *rx, bool full_reset)
static void seg_ack(struct k_work *work)
{
struct seg_rx *rx = CONTAINER_OF(work, struct seg_rx, ack);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct seg_rx *rx = CONTAINER_OF(dwork, struct seg_rx, ack);
int32_t timeout;
if (!rx->in_use || rx->block == BLOCK_COMPLETE(rx->seg_n)) {

View File

@ -507,7 +507,8 @@ out:
static void tcp_send_process(struct k_work *work)
{
struct tcp *conn = CONTAINER_OF(work, struct tcp, send_timer);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_timer);
bool unref;
k_mutex_lock(&conn->lock, K_FOREVER);
@ -1061,7 +1062,8 @@ static int tcp_send_queued_data(struct tcp *conn)
static void tcp_cleanup_recv_queue(struct k_work *work)
{
struct tcp *conn = CONTAINER_OF(work, struct tcp, recv_queue_timer);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, recv_queue_timer);
k_mutex_lock(&conn->lock, K_FOREVER);
@ -1077,7 +1079,8 @@ static void tcp_cleanup_recv_queue(struct k_work *work)
static void tcp_resend_data(struct k_work *work)
{
struct tcp *conn = CONTAINER_OF(work, struct tcp, send_data_timer);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_data_timer);
bool conn_unref = false;
int ret;
@ -1133,7 +1136,8 @@ static void tcp_resend_data(struct k_work *work)
static void tcp_timewait_timeout(struct k_work *work)
{
struct tcp *conn = CONTAINER_OF(work, struct tcp, timewait_timer);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, timewait_timer);
NET_DBG("conn: %p %s", conn, log_strdup(tcp_conn_state(conn, NULL)));
@ -1151,7 +1155,8 @@ static void tcp_establish_timeout(struct tcp *conn)
static void tcp_fin_timeout(struct k_work *work)
{
struct tcp *conn = CONTAINER_OF(work, struct tcp, fin_timer);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, fin_timer);
if (conn->state == TCP_SYN_RECEIVED) {
tcp_establish_timeout(conn);

View File

@ -122,7 +122,8 @@ static void inteval_timeout(struct net_trickle *trickle)
static void trickle_timeout(struct k_work *work)
{
struct net_trickle *trickle = CONTAINER_OF(work,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct net_trickle *trickle = CONTAINER_OF(dwork,
struct net_trickle,
timer);

View File

@ -287,7 +287,8 @@ static inline void clear_reass_cache(uint16_t size, uint16_t tag)
*/
static void reass_timeout(struct k_work *work)
{
struct frag_cache *cache = CONTAINER_OF(work, struct frag_cache, timer);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct frag_cache *cache = CONTAINER_OF(dwork, struct frag_cache, timer);
if (cache->pkt) {
net_pkt_unref(cache->pkt);

View File

@ -94,7 +94,8 @@ static void fsm_send_configure_req(struct ppp_fsm *fsm, bool retransmit)
static void ppp_fsm_timeout(struct k_work *work)
{
struct ppp_fsm *fsm = CONTAINER_OF(work, struct ppp_fsm, timer);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct ppp_fsm *fsm = CONTAINER_OF(dwork, struct ppp_fsm, timer);
NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm,
ppp_state_str(fsm->state), fsm->state);

View File

@ -408,7 +408,8 @@ const struct ppp_protocol_handler *ppp_lcp_get(void)
static void ppp_startup(struct k_work *work)
{
struct ppp_context *ctx = CONTAINER_OF(work, struct ppp_context,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct ppp_context *ctx = CONTAINER_OF(dwork, struct ppp_context,
startup);
int count = 0;

View File

@ -1058,8 +1058,9 @@ int dns_resolve_cancel(struct dns_resolve_context *ctx, uint16_t dns_id)
static void query_timeout(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct dns_pending_query *pending_query =
CONTAINER_OF(work, struct dns_pending_query, timer);
CONTAINER_OF(dwork, struct dns_pending_query, timer);
int ret;
/* We have to take the lock as we're inspecting protected content

View File

@ -467,8 +467,9 @@ static int http_wait_data(int sock, struct http_request *req)
static void http_timeout(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct http_client_internal_data *data =
CONTAINER_OF(work, struct http_client_internal_data, work);
CONTAINER_OF(dwork, struct http_client_internal_data, work);
(void)zsock_close(data->sock);
}

View File

@ -177,7 +177,8 @@ static int onoff_post_write_cb(uint16_t obj_inst_id,
static void buzzer_work_cb(struct k_work *work)
{
struct ipso_buzzer_data *buzzer = CONTAINER_OF(work,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct ipso_buzzer_data *buzzer = CONTAINER_OF(dwork,
struct ipso_buzzer_data,
buzzer_work);
stop_buzzer(buzzer, false);

View File

@ -272,7 +272,8 @@ static int trigger_counter_post_write_cb(uint16_t obj_inst_id,
static void timer_work_cb(struct k_work *work)
{
struct ipso_timer_data *timer = CONTAINER_OF(work,
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct ipso_timer_data *timer = CONTAINER_OF(dwork,
struct ipso_timer_data,
timer_work);
stop_timer(timer, false);

View File

@ -53,8 +53,8 @@ static void pm_device_runtime_state_set(struct pm_device *pm)
static void pm_work_handler(struct k_work *work)
{
struct pm_device *pm = CONTAINER_OF(work,
struct pm_device, work);
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work);
(void)k_mutex_lock(&pm->lock, K_FOREVER);
pm_device_runtime_state_set(pm);

View File

@ -717,8 +717,9 @@ struct test_msg_waitall_data {
static void test_msg_waitall_tx_work_handler(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct test_msg_waitall_data *test_data =
CONTAINER_OF(work, struct test_msg_waitall_data, tx_work);
CONTAINER_OF(dwork, struct test_msg_waitall_data, tx_work);
if (test_data->retries > 0) {
test_send(test_data->sock, test_data->data + test_data->offset, 1, 0);

View File

@ -200,8 +200,9 @@ struct test_msg_waitall_data {
static void test_msg_waitall_tx_work_handler(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct test_msg_waitall_data *test_data =
CONTAINER_OF(work, struct test_msg_waitall_data, tx_work);
CONTAINER_OF(dwork, struct test_msg_waitall_data, tx_work);
if (test_data->retries > 0) {
test_send(test_data->sock, test_data->data + test_data->offset, 1, 0);
@ -376,8 +377,9 @@ struct test_msg_trunc_data {
static void test_msg_trunc_tx_work_handler(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct test_msg_trunc_data *test_data =
CONTAINER_OF(work, struct test_msg_trunc_data, tx_work);
CONTAINER_OF(dwork, struct test_msg_trunc_data, tx_work);
test_send(test_data->sock, test_data->data, test_data->datalen, 0);
}