#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
+#include <linux/rbtree.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
* elapsed, the thread will actually query the hardware.
*/
+static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
+{
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
+ int result = counter->id - this->id;
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&counter->node, parent, new);
+ rb_insert_color(&counter->node, root);
+}
+
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
unsigned long now = jiffies;
struct mlx5_fc *counter;
- struct mlx5_fc *tmp;
+ struct rb_node *node;
+ LIST_HEAD(tmplist);
int err = 0;
spin_lock(&fc_stats->addlist_lock);
- list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+ list_splice_tail_init(&fc_stats->addlist, &tmplist);
- if (!list_empty(&fc_stats->list))
+ if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
spin_unlock(&fc_stats->addlist_lock);
- list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
- struct mlx5_fc_cache *c = &counter->cache;
+ list_for_each_entry(counter, &tmplist, list)
+ mlx5_fc_stats_insert(&fc_stats->counters, counter);
+
+ node = rb_first(&fc_stats->counters);
+ while (node) {
+ struct mlx5_fc_cache *c;
u64 packets;
u64 bytes;
+ counter = rb_entry(node, struct mlx5_fc, node);
+ c = &counter->cache;
+
+ node = rb_next(node);
+
if (counter->deleted) {
- list_del(&counter->list);
+ rb_erase(&counter->node, &fc_stats->counters);
mlx5_cmd_fc_free(dev, counter->id);
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- INIT_LIST_HEAD(&fc_stats->list);
+ fc_stats->counters = RB_ROOT;
INIT_LIST_HEAD(&fc_stats->addlist);
spin_lock_init(&fc_stats->addlist_lock);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter;
struct mlx5_fc *tmp;
+ struct rb_node *node;
cancel_delayed_work_sync(&dev->priv.fc_stats.work);
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
- list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
-
- list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+ list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
list_del(&counter->list);
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
+
+ node = rb_first(&fc_stats->counters);
+ while (node) {
+ counter = rb_entry(node, struct mlx5_fc, node);
+
+ node = rb_next(node);
+
+ rb_erase(&counter->node, &fc_stats->counters);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ }
}
void mlx5_fc_query_cached(struct mlx5_fc *counter,