return index + i;
}
-/*
- * visor_copy_fragsinfo_from_skb(
- * @skb_in: skbuff that we are pulling the frags from
- * @firstfraglen: length of first fragment in skb
- * @frags_max: max len of frags array
- * @frags: frags array filled in on output
+/* visor_copy_fragsinfo_from_skb - copy fragment list in the SKB to a phys_info
+ * array that the IOPART understands
+ * @skb: Skbuff that we are pulling the frags from.
+ * @firstfraglen: Length of first fragment in skb.
+ * @frags_max: Max len of frags array.
+ * @frags: Frags array filled in on output.
*
- * Copy the fragment list in the SKB to a phys_info
- * array that the IOPART understands.
- * Return value indicates number of entries filled in frags
- * Negative values indicate an error.
+ * Return: Positive integer indicating number of entries filled in frags on
+ * success, negative integer on error.
*/
static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
unsigned int firstfraglen,
.write = enable_ints_write,
};
-/*
- * visornic_serverdown_complete - IOPART went down, pause device
- * @work: Work queue it was scheduled on
+/* visornic_serverdown_complete - pause device following IOPART going down
+ * @devdata: Device managed by IOPART.
*
- * The IO partition has gone down and we need to do some cleanup
- * for when it comes back. Treat the IO partition as the link
- * being down.
- * Returns void.
+ * The IO partition has gone down, and we need to do some cleanup for when it
+ * comes back. Treat the IO partition as the link being down.
*/
static void visornic_serverdown_complete(struct visornic_devdata *devdata)
{
devdata->server_down_complete_func = NULL;
}
-/*
- * visornic_serverdown - Command has notified us that IOPART is down
- * @devdata: device that is being managed by IOPART
+/* visornic_serverdown - Command has notified us that IOPART is down
+ * @devdata: Device managed by IOPART.
+ * @complete_func: Function to call when finished.
+ *
+ * Schedule the work needed to handle the server down request. Make sure we
+ * haven't already handled the server change state event.
*
- * Schedule the work needed to handle the server down request. Make
- * sure we haven't already handled the server change state event.
- * Returns 0 if we scheduled the work, -EINVAL on error.
+ * Return: 0 if we scheduled the work, negative integer on error.
*/
static int visornic_serverdown(struct visornic_devdata *devdata,
visorbus_state_complete_func complete_func)
return err;
}
-/*
- * alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition.
- * @netdev: network adapter the rcv bufs are attached too.
+/* alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition
+ * @netdev: Network adapter the rcv bufs are attached too.
+ *
+ * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
+ * so that it can write rcv data into our memory space.
*
- * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
- * so that it can write rcv data into our memory space.
- * Return pointer to sk_buff
+ * Return: Pointer to sk_buff.
*/
static struct sk_buff *alloc_rcv_buf(struct net_device *netdev)
{
return skb;
}
-/*
- * post_skb - post a skb to the IO Partition.
- * @cmdrsp: cmdrsp packet to be send to the IO Partition
- * @devdata: visornic_devdata to post the skb too
- * @skb: skb to give to the IO partition
+/* post_skb - post a skb to the IO Partition
+ * @cmdrsp: Cmdrsp packet to be send to the IO Partition.
+ * @devdata: visornic_devdata to post the skb to.
+ * @skb: Skb to give to the IO partition.
*
- * Send the skb to the IO Partition.
- * Returns 0 or error
+ * Return: 0 on success, negative integer on error.
*/
static int post_skb(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
struct sk_buff *skb)
return 0;
}
-/*
- * send_enbdis - send NET_RCV_ENBDIS to IO Partition
- * @netdev: netdevice we are enable/disable, used as context
- * return value
- * @state: enable = 1/disable = 0
- * @devdata: visornic device we are enabling/disabling
+/* send_enbdis - Send NET_RCV_ENBDIS to IO Partition
+ * @netdev: Netdevice we are enabling/disabling, used as context return value.
+ * @state: Enable = 1/disable = 0.
+ * @devdata: Visornic device we are enabling/disabling.
+ *
+ * Send the enable/disable message to the IO Partition.
*
- * Send the enable/disable message to the IO Partition.
- * Returns 0 or error
+ * Return: 0 on success, negative integer on error.
*/
static int send_enbdis(struct net_device *netdev, int state,
struct visornic_devdata *devdata)
return 0;
}
-/*
- * visornic_disable_with_timeout - Disable network adapter
- * @netdev: netdevice to disable
- * @timeout: timeout to wait for disable
+/* visornic_disable_with_timeout - disable network adapter
+ * @netdev: netdevice to disable.
+ * @timeout: Timeout to wait for disable.
+ *
+ * Disable the network adapter and inform the IO Partition that we are disabled.
+ * Reclaim memory from rcv bufs.
*
- * Disable the network adapter and inform the IO Partition that we
- * are disabled, reclaim memory from rcv bufs.
- * Returns 0 on success, negative for failure of IO Partition
- * responding.
+ * Return: 0 on success, negative integer on failure of IO Partition responding.
*/
static int visornic_disable_with_timeout(struct net_device *netdev,
const int timeout)
return 0;
}
-/*
- * init_rcv_bufs -- initialize receive bufs and send them to the IO Part
- * @netdev: struct netdevice
- * @devdata: visornic_devdata
+/* init_rcv_bufs - initialize receive buffs and send them to the IO Partition
+ * @netdev: struct netdevice.
+ * @devdata: visornic_devdata.
+ *
+ * Allocate rcv buffers and post them to the IO Partition.
*
- * Allocate rcv buffers and post them to the IO Partition.
- * Return 0 for success, and negative for failure.
+ * Return: 0 on success, negative integer on failure.
*/
static int init_rcv_bufs(struct net_device *netdev,
struct visornic_devdata *devdata)
return 0;
}
-/*
- * visornic_enable_with_timeout - send enable to IO Part
- * @netdev: struct net_device
- * @timeout: Time to wait for the ACK from the enable
+/* visornic_enable_with_timeout - send enable to IO Partition
+ * @netdev: struct net_device.
+ * @timeout: Time to wait for the ACK from the enable.
*
- * Sends enable to IOVM, inits, and posts receive buffers to IOVM
- * timeout is defined in msecs (timeout of 0 specifies infinite wait)
- * Return 0 for success, negative for failure.
+ * Sends enable to IOVM and inits, and posts receive buffers to IOVM. Timeout is
+ * defined in msecs (timeout of 0 specifies infinite wait).
+ *
+ * Return: 0 on success, negative integer on failure.
*/
static int visornic_enable_with_timeout(struct net_device *netdev,
const int timeout)
return 0;
}
-/*
- * visornic_timeout_reset - handle xmit timeout resets
- * @work work item that scheduled the work
+/* visornic_timeout_reset - handle xmit timeout resets
+ * @work: Work item that scheduled the work.
*
- * Transmit Timeouts are typically handled by resetting the
- * device for our virtual NIC we will send a Disable and Enable
- * to the IOVM. If it doesn't respond we will trigger a serverdown.
+ * Transmit timeouts are typically handled by resetting the device for our
+ * virtual NIC; we will send a disable and enable to the IOVM. If it doesn't
+ * respond, we will trigger a serverdown.
*/
static void visornic_timeout_reset(struct work_struct *work)
{
rtnl_unlock();
}
-/*
- * visornic_open - Enable the visornic device and mark the queue started
- * @netdev: netdevice to start
+/* visornic_open - enable the visornic device and mark the queue started
+ * @netdev: netdevice to start.
+ *
+ * Enable the device and start the transmit queue.
*
- * Enable the device and start the transmit queue.
- * Return 0 for success
+ * Return: 0 on success.
*/
static int visornic_open(struct net_device *netdev)
{
return 0;
}
-/*
- * visornic_close - Disables the visornic device and stops the queues
- * @netdev: netdevice to start
+/* visornic_close - disables the visornic device and stops the queues
+ * @netdev: netdevice to stop.
+ *
+ * Disable the device and stop the transmit queue.
*
- * Disable the device and stop the transmit queue.
- * Return 0 for success
+ * Return 0 on success.
*/
static int visornic_close(struct net_device *netdev)
{
return 0;
}
-/*
- * devdata_xmits_outstanding - compute outstanding xmits
- * @devdata: visornic_devdata for device
+/* devdata_xmits_outstanding - compute outstanding xmits
+ * @devdata: visornic_devdata for device
*
- * Return value is the number of outstanding xmits.
+ * Return: Long integer representing the number of outstanding xmits.
*/
static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
{
+ devdata->chstat.sent_xmit + 1);
}
-/*
- * vnic_hit_high_watermark
- * @devdata: indicates visornic device we are checking
- * @high_watermark: max num of unacked xmits we will tolerate,
- * before we will start throttling
+/* vnic_hit_high_watermark
+ * @devdata: Indicates visornic device we are checking.
+ * @high_watermark: Max num of unacked xmits we will tolerate before we will
+ * start throttling.
*
- * Returns true iff the number of unacked xmits sent to
- * the IO partition is >= high_watermark.
+ * Return: True iff the number of unacked xmits sent to the IO Partition is >=
+ * high_watermark. False otherwise.
*/
static bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
ulong high_watermark)
return (devdata_xmits_outstanding(devdata) >= high_watermark);
}
-/*
- * vnic_hit_low_watermark
- * @devdata: indicates visornic device we are checking
- * @low_watermark: we will wait until the num of unacked xmits
- * drops to this value or lower before we start
- * transmitting again
+/* vnic_hit_low_watermark
+ * @devdata: Indicates visornic device we are checking.
+ * @low_watermark: We will wait until the num of unacked xmits drops to this
+ * value or lower before we start transmitting again.
*
- * Returns true iff the number of unacked xmits sent to
- * the IO partition is <= low_watermark.
+ * Return: True iff the number of unacked xmits sent to the IO Partition is <=
+ * low_watermark.
*/
static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
ulong low_watermark)
return (devdata_xmits_outstanding(devdata) <= low_watermark);
}
-/*
- * visornic_xmit - send a packet to the IO Partition
- * @skb: Packet to be sent
- * @netdev: net device the packet is being sent from
+/* visornic_xmit - send a packet to the IO Partition
+ * @skb: Packet to be sent.
+ * @netdev: Net device the packet is being sent from.
*
- * Convert the skb to a cmdrsp so the IO Partition can understand it.
- * Send the XMIT command to the IO Partition for processing. This
- * function is protected from concurrent calls by a spinlock xmit_lock
- * in the net_device struct, but as soon as the function returns it
- * can be called again.
- * Returns NETDEV_TX_OK.
+ * Convert the skb to a cmdrsp so the IO Partition can understand it, and send
+ * the XMIT command to the IO Partition for processing. This function is
+ * protected from concurrent calls by a spinlock xmit_lock in the net_device
+ * struct. As soon as the function returns, it can be called again.
+ *
+ * Return: NETDEV_TX_OK.
*/
static int visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
return NETDEV_TX_OK;
}
-/*
- * visornic_get_stats - returns net_stats of the visornic device
- * @netdev: netdevice
+/* visornic_get_stats - returns net_stats of the visornic device
+ * @netdev: netdevice.
*
- * Returns the net_device_stats for the device
+ * Return: Pointer to the net_device_stats struct for the device.
*/
static struct net_device_stats *visornic_get_stats(struct net_device *netdev)
{
return &devdata->net_stats;
}
-/*
- * visornic_change_mtu - changes mtu of device.
- * @netdev: netdevice
- * @new_mtu: value of new mtu
+/* visornic_change_mtu - changes mtu of device
+ * @netdev: netdevice.
+ * @new_mtu: Value of new mtu.
*
- * MTU cannot be changed by system, must be changed via
- * CONTROLVM message. All vnics and pnics in a switch have
- * to have the same MTU for everything to work.
- * Currently not supported.
- * Returns EINVAL
+ * The device's MTU cannot be changed by system; it must be changed via a
+ * CONTROLVM message. All vnics and pnics in a switch have to have the same MTU
+ * for everything to work. Currently not supported.
+ *
+ * Return: -EINVAL.
*/
static int visornic_change_mtu(struct net_device *netdev, int new_mtu)
{
return -EINVAL;
}
-/*
- * visornic_set_multi - changes mtu of device.
- * @netdev: netdevice
+/* visornic_set_multi - set visornic device flags
+ * @netdev: netdevice.
*
- * Only flag we support currently is IFF_PROMISC
- * Returns void
+ * The only flag we currently support is IFF_PROMISC.
*/
static void visornic_set_multi(struct net_device *netdev)
{
devdata->old_flags = netdev->flags;
}
-/*
- * visornic_xmit_timeout - request to timeout the xmit
- * @netdev
+/* visornic_xmit_timeout - request to timeout the xmit
+ * @netdev: netdevice.
*
- * Queue the work and return. Make sure we have not already
- * been informed the IO Partition is gone, if it is gone
- * we will already timeout the xmits.
+ * Queue the work and return. Make sure we have not already been informed that
+ * the IO Partition is gone; if so, we will have already timed-out the xmits.
*/
static void visornic_xmit_timeout(struct net_device *netdev)
{
spin_unlock_irqrestore(&devdata->priv_lock, flags);
}
-/*
- * repost_return - repost rcv bufs that have come back
- * @cmdrsp: io channel command struct to post
- * @devdata: visornic devdata for the device
- * @skb: skb
- * @netdev: netdevice
+/* repost_return - repost rcv bufs that have come back
+ * @cmdrsp: IO channel command struct to post.
+ * @devdata: Visornic devdata for the device.
+ * @skb: Socket buffer.
+ * @netdev: netdevice.
*
- * Repost rcv buffers that have been returned to us when
- * we are finished with them.
- * Returns 0 for success, -1 for error.
+ * Repost rcv buffers that have been returned to us when we are finished
+ * with them.
+ *
+ * Return: 0 for success, negative integer on error.
*/
static int repost_return(struct uiscmdrsp *cmdrsp,
struct visornic_devdata *devdata,
return status;
}
-/*
- * visornic_rx - Handle receive packets coming back from IO Part
- * @cmdrsp: Receive packet returned from IO Part
+/* visornic_rx - handle receive packets coming back from IO Partition
+ * @cmdrsp: Receive packet returned from IO Partition.
*
- * Got a receive packet back from the IO Part, handle it and send
- * it up the stack.
- * Returns 1 iff an skb was received, otherwise 0
+ * Got a receive packet back from the IO Partition; handle it and send it up
+ * the stack.
+
+ * Return: 1 iff an skb was received, otherwise 0.
*/
static int visornic_rx(struct uiscmdrsp *cmdrsp)
{
return 1;
}
-/*
- * devdata_initialize - Initialize devdata structure
- * @devdata: visornic_devdata structure to initialize
- * #dev: visorbus_deviced it belongs to
+/* devdata_initialize - initialize devdata structure
+ * @devdata: visornic_devdata structure to initialize.
+ * @dev: visorbus_device it belongs to.
+ *
+ * Setup initial values for the visornic, based on channel and default values.
*
- * Setup initial values for the visornic based on channel and default
- * values.
- * Returns a pointer to the devdata structure
+ * Return: A pointer to the devdata structure.
*/
static struct visornic_devdata *devdata_initialize(
struct visornic_devdata *devdata,
return devdata;
}
-/*
- * devdata_release - Frees up references in devdata
- * @devdata: struct to clean up
- *
- * Frees up references in devdata.
- * Returns void
+/* devdata_release - free up references in devdata
+ * @devdata: Struct to clean up.
*/
static void devdata_release(struct visornic_devdata *devdata)
{
.read = info_debugfs_read,
};
-/*
- * send_rcv_posts_if_needed
- * @devdata: visornic device
+/* send_rcv_posts_if_needed - send receive buffers to the IO Partition.
+ * @devdata: Visornic device.
*
- * Send receive buffers to the IO Partition.
- * Returns void
+ * Return: 0.
*/
static int send_rcv_posts_if_needed(struct visornic_devdata *devdata)
{
return 0;
}
-/*
- * drain_resp_queue - drains and ignores all messages from the resp queue
- * @cmdrsp: io channel command response message
- * @devdata: visornic device to drain
+/* drain_resp_queue - drains and ignores all messages from the resp queue
+ * @cmdrsp: IO channel command response message.
+ * @devdata: Visornic device to drain.
*/
static void drain_resp_queue(struct uiscmdrsp *cmdrsp,
struct visornic_devdata *devdata)
;
}
-/*
- * service_resp_queue - drains the response queue
- * @cmdrsp: io channel command response message
- * @devdata: visornic device to drain
+/* service_resp_queue - drain the response queue
+ * @cmdrsp: IO channel command response message.
+ * @devdata: Visornic device to drain.
+ * @rx_work_done:
+ * @budget:
*
- * Drain the response queue of any responses from the IO partition.
- * Process the responses as we get them.
- * Returns when response queue is empty or when the thread stops.
+ * Drain the response queue of any responses from the IO Partition. Process the
+ * responses as we get them.
*/
static void service_resp_queue(struct uiscmdrsp *cmdrsp,
struct visornic_devdata *devdata,
return rx_count;
}
-/*
- * poll_for_irq - Checks the status of the response queue.
- * @v: void pointer to the visronic devdata
+/* poll_for_irq - checks the status of the response queue
+ * @v: Void pointer to the visronic devdata struct.
*
- * Main function of the vnic_incoming thread. Periodically check the
- * response queue and drain it if needed.
- * Returns when thread has stopped.
+ * Main function of the vnic_incoming thread. Periodically check the response
+ * queue and drain it if needed.
*/
static void poll_for_irq(unsigned long v)
{
mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
}
-/*
- * visornic_probe - probe function for visornic devices
- * @dev: The visor device discovered
+/* visornic_probe - probe function for visornic devices
+ * @dev: The visor device discovered.
+ *
+ * Called when visorbus discovers a visornic device on its bus. It creates a new
+ * visornic ethernet adapter.
*
- * Called when visorbus discovers a visornic device on its
- * bus. It creates a new visornic ethernet adapter.
- * Returns 0 or negative for error.
+ * Return: 0 on success, or negative integer on error.
*/
static int visornic_probe(struct visor_device *dev)
{
return err;
}
-/*
- * host_side_disappeared - IO part is gone.
- * @devdata: device object
+/* host_side_disappeared - IO Partition is gone
+ * @devdata: Device object.
*
- * IO partition servicing this device is gone, do cleanup
- * Returns void.
+ * IO partition servicing this device is gone; do cleanup.
*/
static void host_side_disappeared(struct visornic_devdata *devdata)
{
spin_unlock_irqrestore(&devdata->priv_lock, flags);
}
-/*
- * visornic_remove - Called when visornic dev goes away
- * @dev: visornic device that is being removed
+/* visornic_remove - called when visornic dev goes away
+ * @dev: Visornic device that is being removed.
*
- * Called when DEVICE_DESTROY gets called to remove device.
- * Returns void
+ * Called when DEVICE_DESTROY gets called to remove device.
*/
static void visornic_remove(struct visor_device *dev)
{
free_netdev(netdev);
}
-/*
- * visornic_pause - Called when IO Part disappears
- * @dev: visornic device that is being serviced
- * @complete_func: call when finished.
+/* visornic_pause - called when IO Part disappears
+ * @dev: Visornic device that is being serviced.
+ * @complete_func: Call when finished.
*
- * Called when the IO Partition has gone down. Need to free
- * up resources and wait for IO partition to come back. Mark
- * link as down and don't attempt any DMA. When we have freed
- * memory call the complete_func so that Command knows we are
- * done. If we don't call complete_func, IO part will never
- * come back.
- * Returns 0 for success.
+ * Called when the IO Partition has gone down. Need to free up resources and
+ * wait for IO partition to come back. Mark link as down and don't attempt any
+ * DMA. When we have freed memory, call the complete_func so that Command knows
+ * we are done. If we don't call complete_func, the IO Partition will never
+ * come back.
+ *
+ * Return: 0 on success.
*/
static int visornic_pause(struct visor_device *dev,
visorbus_state_complete_func complete_func)
return 0;
}
-/*
- * visornic_resume - Called when IO part has recovered
- * @dev: visornic device that is being serviced
- * @compelte_func: call when finished
+/* visornic_resume - called when IO Partition has recovered
+ * @dev: Visornic device that is being serviced.
+ * @compelte_func: Call when finished.
+ *
+ * Called when the IO partition has recovered. Re-establish connection to the IO
+ * Partition and set the link up. Okay to do DMA again.
*
- * Called when the IO partition has recovered. Reestablish
- * connection to the IO part and set the link up. Okay to do
- * DMA again.
- * Returns 0 for success.
+ * Returns 0 for success, negative integer on error.
*/
static int visornic_resume(struct visor_device *dev,
visorbus_state_complete_func complete_func)
.channel_interrupt = NULL,
};
-/*
- * visornic_init - Init function
+/* visornic_init - init function
+ *
+ * Init function for the visornic driver. Do initial driver setup and wait
+ * for devices.
*
- * Init function for the visornic driver. Do initial driver setup
- * and wait for devices.
- * Returns 0 for success, negative for error.
+ * Return: 0 on success, negative integer on error.
*/
static int visornic_init(void)
{
return err;
}
-/*
- * visornic_cleanup - driver exit routine
+/* visornic_cleanup - driver exit routine
*
- * Unregister driver from the bus and free up memory.
+ * Unregister driver from the bus and free up memory.
*/
static void visornic_cleanup(void)
{