Chromium Code Reviews| Index: kernel/port.c |
| diff --git a/kernel/port.c b/kernel/port.c |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..b879c4f67128202c27f4ef6d542961af3c999e26 |
| --- /dev/null |
| +++ b/kernel/port.c |
| @@ -0,0 +1,362 @@ |
| +/* |
| + * Copyright (c) 2015 Carlos Pizano-Uribe cpu@chromium.org |
| + * |
| + * Permission is hereby granted, free of charge, to any person obtaining |
| + * a copy of this software and associated documentation files |
| + * (the "Software"), to deal in the Software without restriction, |
| + * including without limitation the rights to use, copy, modify, merge, |
| + * publish, distribute, sublicense, and/or sell copies of the Software, |
| + * and to permit persons to whom the Software is furnished to do so, |
| + * subject to the following conditions: |
| + * |
| + * The above copyright notice and this permission notice shall be |
| + * included in all copies or substantial portions of the Software. |
| + * |
| + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| + */ |
| + |
| +/** |
| + * @file |
| + * @brief Port object functions |
| + * @defgroup event Events |
| + * |
| + */ |
| + |
| +#include <kernel/port.h> |
| +#include <debug.h> |
| +#include <list.h> |
| +#include <malloc.h> |
| +#include <assert.h> |
| +#include <string.h> |
| +#include <err.h> |
| +#include <kernel/thread.h> |
| + |
| +#define WRITEPORT_MAGIC 'prtw' |
| +#define READPORT_MAGIC 'prtr' |
| +#define PORTGROUP_MAGIC 'prtg' |
| + |
| +#define PORT_BUFF_SIZE 8 |
| +#define PORT_BUFF_SIZE_BIG 64 |
| + |
| +#define RESCHEDULE_POLICY true |
| + |
| +typedef struct { |
| + uint max; |
| + uint head; |
| + uint tail; |
| + port_packet_t packet[1]; |
| +} port_buf_t; |
| + |
| +typedef struct { |
| + int magic; |
| + struct list_node node; |
| + port_buf_t* buf; |
| + struct list_node rp_list; |
| + char name[PORT_NAME_LEN]; |
| +} write_port_t; |
| + |
| +// todo (cpu) add the |mode| bit to the write port and check |
| +// that unicast ports have only one read port. |
| + |
| +typedef struct { |
| + int magic; |
| + wait_queue_t wait; |
| + struct list_node rp_list; |
| +} port_group_t; |
| + |
| +typedef struct { |
| + int magic; |
| + struct list_node node; |
| + port_buf_t* buf; |
| + void* ctx; |
| + wait_queue_t wait; |
| + write_port_t* wport; |
| + port_group_t* gport; |
| +} read_port_t; |
| + |
| + |
| +static struct list_node write_port_list; |
| + |
| + |
| +static port_buf_t* make_buf(uint pk_count) |
| +{ |
| + uint size = sizeof(port_buf_t) + ((pk_count - 1) * sizeof(port_packet_t)); |
| + port_buf_t* buf = (port_buf_t*) malloc(size); |
| + if (!buf) |
| + return NULL; |
| + buf->max = pk_count; |
| + buf->head = buf->tail = 0; |
| + return buf; |
| +} |
| + |
| +static int buf_write(const port_packet_t* packets, size_t count, port_buf_t* buf) |
| +{ |
| + // todo (cpu). circular buffer write. |
| + return 0; |
| +} |
| + |
| +static int buf_read(port_buf_t* buf, port_result_t* pr) |
| +{ |
| + // todo (cpu). circular buffer read. |
| + return 0; |
| +} |
| + |
| +// must be called before any use of ports. |
| +void port_init(void) |
| +{ |
| + list_initialize(&write_port_list); |
| +} |
| + |
| +status_t port_create(const char* name, port_mode_t mode, port_t* port) |
| +{ |
| + if (!name || !port) |
| + return ERR_INVALID_ARGS; |
| + |
| + // only unicast ports can have a large buffer. |
| + if (mode & PORT_MODE_BROADCAST) { |
| + if (mode & PORT_MODE_BIG_BUFFER) |
| + return ERR_INVALID_ARGS; |
| + } |
| + |
| + // lookup for existing port, return that if found. |
| + write_port_t* wp = NULL; |
| + THREAD_LOCK(state1); |
| + list_for_every_entry(&write_port_list, wp, write_port_t, node) { |
| + if (strcmp(wp->name, name) == 0) { |
| + THREAD_UNLOCK(state1); |
| + *port = (void*)wp; |
| + return NO_ERROR; |
| + } |
| + } |
| + THREAD_UNLOCK(state1); |
| + |
| + // not found, create the write port and the circular buffer. |
| + wp = malloc(sizeof(write_port_t)); |
| + if (!wp) |
| + return ERR_NO_MEMORY; |
| + |
| + memset(wp, 0, sizeof(write_port_t)); |
| + wp->magic = WRITEPORT_MAGIC; |
| + strlcpy(wp->name, name, sizeof(wp->name)); |
| + list_initialize(&wp->rp_list); |
| + |
| + uint size = mode & PORT_MODE_BIG_BUFFER ? PORT_BUFF_SIZE_BIG : PORT_BUFF_SIZE; |
| + wp->buf = make_buf(size); |
| + if (!wp->buf) { |
| + free(wp); |
| + return ERR_NO_MEMORY; |
| + } |
| + |
| + // race condtion! a port with the same name could have been created |
| + // by another thread at is point. |
| + THREAD_LOCK(state2); |
| + list_add_tail(&write_port_list, &wp->node); |
| + THREAD_UNLOCK(state2); |
| + |
| + *port = (void*)wp; |
| + return NO_ERROR; |
| +} |
| + |
| +status_t port_open(const char* name, void* ctx, port_t* port) |
| +{ |
| + if (!name || !port) |
| + return ERR_INVALID_ARGS; |
| + |
| + // assume success; create the read port and buffer now. |
| + read_port_t* rp = malloc(sizeof(read_port_t)); |
| + if (!rp) |
| + return ERR_NO_MEMORY; |
| + |
| + memset(rp, 0, sizeof(read_port_t)); |
| + rp->magic = READPORT_MAGIC; |
| + wait_queue_init(&rp->wait); |
| + rp->ctx = ctx; |
| + |
| + // |buf| might not be needed, but we always allocate outside the lock. |
| + port_buf_t* buf = make_buf(PORT_BUFF_SIZE); |
| + |
| + // find the named write port and associate it with read port. |
| + status_t rc = ERR_NOT_FOUND; |
| + |
| + THREAD_LOCK(state); |
| + write_port_t* wp = NULL; |
| + list_for_every_entry(&write_port_list, wp, write_port_t, node) { |
| + if (strcmp(wp->name, name) == 0) { |
| + // found; add read port to write port list. |
|
cpu_(ooo_6.6-7.5)
2015/11/11 00:36:31
note that the list_add_tail was not unrolled previ
|
| + rp->wport = wp; |
| + if (wp->buf) { |
| + // this is the first read port; transfer the circular buffer. |
| + list_add_tail(&wp->rp_list, &rp->node); |
| + rp->buf = wp->buf; |
| + wp->buf = NULL; |
| + rc = NO_ERROR; |
| + } else if (buf) { |
| + // not first read port; use the new (small) circular buffer. |
| + list_add_tail(&wp->rp_list, &rp->node); |
| + rp->buf = buf; |
| + buf = NULL; |
| + rc = NO_ERROR; |
| + } else { |
| + // |buf| allocation failed and the buffer was needed. |
|
cpu_(ooo_6.6-7.5)
2015/11/11 00:36:31
note the test asked for line 182 is done in line 1
|
| + rc = ERR_NO_MEMORY; |
| + } |
| + break; |
| + } |
| + } |
| + THREAD_UNLOCK(state); |
| + |
| + if (buf) |
| + free(buf); |
| + |
| + if (rc == NO_ERROR) { |
| + *port = (void*)rp; |
| + } else { |
| + free(rp); |
| + } |
| + return rc; |
| +} |
| + |
| +status_t port_group(port_t* ports, size_t count, port_t* group) |
| +{ |
| + // assume success; create port group now. |
| + port_group_t* pg = malloc(sizeof(port_group_t)); |
| + if (!pg) |
| + return ERR_NO_MEMORY; |
| + |
| + memset(pg, 0, sizeof(port_group_t)); |
| + pg->magic = PORTGROUP_MAGIC; |
| + wait_queue_init(&pg->wait); |
| + list_initialize(&pg->rp_list); |
| + |
| + status_t rc = NO_ERROR; |
| + |
| + THREAD_LOCK(state); |
| + for (size_t ix = 0; ix != count; ix++) { |
| + read_port_t* rp = (read_port_t*)ports[ix]; |
| + if ((rp->magic != READPORT_MAGIC) || rp->gport) { |
| + // wrong type of port, or port already part of a group, |
| + // in any case, undo the changes to the previous read ports. |
| + for (size_t jx = 0; jx != ix; jx++) { |
| + ((read_port_t*)ports[jx])->gport = NULL; |
| + } |
| + rc = ERR_BAD_HANDLE; |
| + break; |
| + } |
| + // link port group and read port. |
| + rp->gport = pg; |
| + list_add_tail(&pg->rp_list, &rp->node); |
| + } |
| + THREAD_UNLOCK(state); |
| + |
|
cpu_(ooo_6.6-7.5)
2015/11/11 00:36:31
redid this so there are less unlocks and to match
|
| + if (rc == NO_ERROR) { |
| + *group = (port_t*)pg; |
| + } else { |
| + free(pg); |
| + } |
| + return rc; |
| +} |
| + |
| +status_t port_write(port_t port, const port_packet_t* pk, size_t count) |
| +{ |
| + write_port_t* wp = (write_port_t*)port; |
| + THREAD_LOCK(state); |
| + if (wp->magic != WRITEPORT_MAGIC) { |
| + // wrong port type. |
| + THREAD_UNLOCK(state); |
| + return ERR_BAD_HANDLE; |
| + } |
| + |
| + if (wp->buf) { |
| + // there are no read ports, just write to the buffer. |
| + buf_write(pk, count, wp->buf); |
| + } else { |
| + // there are read ports. for each, write and attempt to wake a thread |
| + // from the port group or from the read port itself. |
| + read_port_t* rp; |
| + list_for_every_entry(&wp->rp_list, rp, read_port_t, node) { |
| + buf_write(pk, count, rp->buf); |
| + |
| + int count = 0; |
| + if (rp->gport) { |
| + count = wait_queue_wake_one(&rp->gport->wait, RESCHEDULE_POLICY, NO_ERROR); |
| + } |
| + if (!count) { |
| + wait_queue_wake_one(&rp->wait, RESCHEDULE_POLICY, NO_ERROR); |
| + } |
| + } |
| + } |
| + |
| + THREAD_UNLOCK(state); |
| + return NO_ERROR; |
| +} |
| + |
| +static inline status_t read_no_lock(read_port_t* rp, lk_time_t timeout, port_result_t* result) |
| +{ |
| + int read = buf_read(rp->buf, result); |
| + if (read > 0) { |
| + result->ctx = rp->ctx; |
| + return NO_ERROR; |
| + } else if (read < 0) { |
| + return (status_t)read; |
| + } |
| + // early return allows compiler to elide the rest for the group read case. |
| + if (!timeout) |
| + return ERR_TIMED_OUT; |
| + |
| + status_t rc = wait_queue_block(&rp->wait, timeout); |
| + if (rc != NO_ERROR) |
| + return rc; |
| + // recursive tail call is usually optimized away with a goto. |
| + return read_no_lock(rp, timeout, result); |
| +} |
| + |
| +status_t port_read(port_t port, lk_time_t timeout, port_result_t* result) |
| +{ |
| + status_t rc = ERR_GENERIC; |
| + read_port_t* rp = (read_port_t*)port; |
| + |
| + THREAD_LOCK(state); |
| + if (rp->magic == READPORT_MAGIC) { |
| + // dealing with a single port. |
| + rc = read_no_lock(rp, timeout, result); |
| + } else if (rp->magic == PORTGROUP_MAGIC) { |
| + // dealing with a port group. |
| + port_group_t* pg = (port_group_t*)port; |
| + do { |
| + // read each port with no timeout. |
| + list_for_every_entry(&pg->rp_list, rp, read_port_t, node) { |
| + rc = read_no_lock(rp, 0, result); |
| + if (rc != ERR_TIMED_OUT) |
| + goto read_exit; |
| + } |
| + // no data, block on the group waitqueue. |
| + rc = wait_queue_block(&pg->wait, timeout); |
| + } while (rc == NO_ERROR); |
| + } else { |
| + // wrong port type. |
| + rc = ERR_BAD_HANDLE; |
| + } |
| + |
| +read_exit: |
| + THREAD_UNLOCK(state); |
| + return rc; |
| +} |
| + |
| +status_t port_destroy(port_t port) |
| +{ |
| + // todo (cpu) |
| + return NO_ERROR; |
| +} |
| + |
| +status_t port_close(port_t port) |
| +{ |
| + // todo (cpu) |
| + return NO_ERROR; |
| +} |
| + |