for k in self.keys():
self.__delitem__(k)
- def items_lookup_batch(self):
- # batch size is set to the maximum
- batch_size = self.max_entries
- out_batch = ct.c_uint32(0)
- keys = (type(self.Key()) * batch_size)()
- values = (type(self.Leaf()) * batch_size)()
- count = ct.c_uint32(batch_size)
- res = lib.bpf_lookup_batch(self.map_fd,
- None,
- ct.byref(out_batch),
- ct.byref(keys),
- ct.byref(values),
- ct.byref(count)
- )
-
- errcode = ct.get_errno()
- if (errcode == errno.EINVAL):
- raise Exception("BPF_MAP_LOOKUP_BATCH is invalid.")
-
- if (res != 0 and errcode != errno.ENOENT):
- raise Exception("BPF_MAP_LOOKUP_BATCH has failed")
-
- for i in range(0, count.value):
- yield (keys[i], values[i])
-
- def items_delete_batch(self, keys=None):
- """Delete all the key-value pairs in the map if no key are given.
- In that case, it is faster to call lib.bpf_lookup_and_delete_batch than
- create keys list and then call lib.bpf_delete_batch on these keys.
- If the array of keys is given then it deletes the related key-value.
+ def _alloc_keys_values(self, alloc_k=False, alloc_v=False, count=None):
+ """Allocate keys and/or values arrays. Useful for in items_*_batch.
+
+ Args:
+ alloc_k (bool): True to allocate keys array, False otherwise.
+ Default is False.
+ alloc_v (bool): True to allocate values array, False otherwise.
+ Default is False.
+ count (int): number of elements in the array(s) to allocate. If
+ count is None then it allocates the maximum number of elements i.e
+ self.max_entries.
+
+ Returns:
+ tuple: (count, keys, values). Where count is ct.c_uint32,
+ and keys and values an instance of ct.Array
+ Raises:
+ ValueError: If count is less than 1 or greater than
+ self.max_entries.
+ """
+ keys = values = None
+ if not alloc_k and not alloc_v:
+ return (ct.c_uint32(0), None, None)
+
+ if not count: # means alloc maximum size
+ count = self.max_entries
+ elif count < 1 or count > self.max_entries:
+ raise ValueError("Wrong count")
+
+ if alloc_k:
+ keys = (self.Key * count)()
+ if alloc_v:
+ values = (self.Leaf * count)()
+
+ return (ct.c_uint32(count), keys, values)
+
+ def _sanity_check_keys_values(self, keys=None, values=None):
+ """Check if the given keys or values have the right type and size.
+
+ Args:
+ keys (ct.Array): keys array to check
+ values (ct.Array): values array to check
+ Returns:
+ ct.c_uint32 : the size of the array(s)
+ Raises:
+ ValueError: If length of arrays is less than 1 or greater than
+ self.max_entries, or when both arrays length are different.
+ TypeError: If the keys and values are not an instance of ct.Array
"""
- if keys is not None:
- # a ct.Array is expected
- if not isinstance(keys, ct.Array):
- raise TypeError
+ arr_len = 0
+ for elem in [keys, values]:
+ if elem:
+ if not isinstance(elem, ct.Array):
+ raise TypeError
- batch_size = len(keys)
+ arr_len = len(elem)
+ if arr_len < 1 or arr_len > self.max_entries:
+ raise ValueError("Array's length is wrong")
- # check that batch between limits and coherent with the provided values
- if batch_size < 1 or batch_size > self.max_entries:
- raise KeyError
+ if keys and values:
+ # check both length are equal
+ if len(keys) != len(values):
+ raise ValueError("keys array length != values array length")
- count = ct.c_uint32(batch_size)
+ return ct.c_uint32(arr_len)
+ def items_lookup_batch(self):
+ """Look up all the key-value pairs in the map.
+
+ Args:
+ None
+ Yields:
+ tuple: The tuple of (key,value) for every entries that have
+ been looked up.
+ Notes: lookup batch on a keys subset is not supported by the kernel.
+ """
+ for k, v in self._items_lookup_and_optionally_delete_batch(delete=False):
+ yield(k, v)
+ return
+
+ def items_delete_batch(self, ct_keys=None):
+ """Delete the key-value pairs related to the keys given as parameters.
+ Note that if no key are given, it is faster to call
+ lib.bpf_lookup_and_delete_batch than create keys array and then call
+ lib.bpf_delete_batch on these keys.
+
+ Args:
+ ct_keys (ct.Array): keys array to delete. If an array of keys is
+ given then it deletes all the related keys-values.
+ If keys is None (default) then it deletes all entries.
+ Yields:
+ tuple: The tuple of (key,value) for every entries that have
+ been deleted.
+ Raises:
+ Exception: If bpf syscall return value indicates an error.
+ """
+ if ct_keys is not None:
+ ct_cnt = self._sanity_check_keys_values(keys=ct_keys)
res = lib.bpf_delete_batch(self.map_fd,
- ct.byref(keys),
- ct.byref(count)
+ ct.byref(ct_keys),
+ ct.byref(ct_cnt)
)
- errcode = ct.get_errno()
- if (errcode == errno.EINVAL):
- raise Exception("BPF_MAP_DELETE_BATCH is invalid.")
+ if (res != 0):
+ raise Exception("BPF_MAP_DELETE_BATCH has failed: %s"
+ % os.strerror(ct.get_errno()))
- if (res != 0 and errcode != errno.ENOENT):
- raise Exception("BPF_MAP_DELETE_BATCH has failed")
else:
for _ in self.items_lookup_and_delete_batch():
return
- def items_update_batch(self, keys, values):
+ def items_update_batch(self, ct_keys, ct_values):
"""Update all the key-value pairs in the map provided.
- The lists must be the same length, between 1 and the maximum number of entries.
+ The arrays must be the same length, between 1 and the maximum number
+ of entries.
+
+ Args:
+ ct_keys (ct.Array): keys array to update
+ ct_values (ct.Array): values array to update
+ Raises:
+ Exception: If bpf syscall return value indicates an error.
"""
- # two ct.Array are expected
- if not isinstance(keys, ct.Array) or not isinstance(values, ct.Array):
- raise TypeError
-
- batch_size = len(keys)
-
- # check that batch between limits and coherent with the provided values
- if batch_size < 1 or batch_size > self.max_entries or batch_size != len(values):
- raise KeyError
-
- count = ct.c_uint32(batch_size)
+ ct_cnt = self._sanity_check_keys_values(keys=ct_keys, values=ct_values)
res = lib.bpf_update_batch(self.map_fd,
- ct.byref(keys),
- ct.byref(values),
- ct.byref(count)
+ ct.byref(ct_keys),
+ ct.byref(ct_values),
+ ct.byref(ct_cnt)
)
+ if (res != 0):
+ raise Exception("BPF_MAP_UPDATE_BATCH has failed: %s"
+ % os.strerror(ct.get_errno()))
+
+ def items_lookup_and_delete_batch(self):
+ """Look up and delete all the key-value pairs in the map.
+
+ Args:
+ None
+ Yields:
+ tuple: The tuple of (key,value) for every entries that have
+ been looked up and deleted.
+ Notes: lookup and delete batch on a keys subset is not supported by
+ the kernel.
+ """
+ for k, v in self._items_lookup_and_optionally_delete_batch(delete=True):
+ yield(k, v)
+ return
+
+ def _items_lookup_and_optionally_delete_batch(self, delete=True):
+ """Look up and optionally delete all the key-value pairs in the map.
+
+ Args:
+ delete (bool) : look up and delete the key-value pairs when True,
+ else just look up.
+ Yields:
+ tuple: The tuple of (key,value) for every entries that have
+ been looked up and deleted.
+ Raises:
+ Exception: If bpf syscall return value indicates an error.
+ Notes: lookup and delete batch on a keys subset is not supported by
+ the kernel.
+ """
+ if delete is True:
+ bpf_batch = lib.bpf_lookup_and_delete_batch
+ bpf_cmd = "BPF_MAP_LOOKUP_AND_DELETE_BATCH"
+ else:
+ bpf_batch = lib.bpf_lookup_batch
+ bpf_cmd = "BPF_MAP_LOOKUP_BATCH"
+
+ # alloc keys and values to the max size
+ ct_buf_size, ct_keys, ct_values = self._alloc_keys_values(alloc_k=True,
+ alloc_v=True)
+ ct_out_batch = ct_cnt = ct.c_uint32(0)
+ total = 0
+ while True:
+ ct_cnt.value = ct_buf_size.value - total
+ res = bpf_batch(self.map_fd,
+ ct.byref(ct_out_batch) if total else None,
+ ct.byref(ct_out_batch),
+ ct.byref(ct_keys, ct.sizeof(self.Key) * total),
+ ct.byref(ct_values, ct.sizeof(self.Leaf) * total),
+ ct.byref(ct_cnt)
+ )
+ errcode = ct.get_errno()
+ total += ct_cnt.value
+ if (res != 0 and errcode != errno.ENOENT):
+ raise Exception("%s has failed: %s" % (bpf_cmd,
+ os.strerror(errcode)))
- errcode = ct.get_errno()
- if (errcode == errno.EINVAL):
- raise Exception("BPF_MAP_UPDATE_BATCH is invalid.")
+ if res != 0:
+ break # success
- if (res != 0 and errcode != errno.ENOENT):
- raise Exception("BPF_MAP_UPDATE_BATCH has failed")
+ if total == ct_buf_size.value: # buffer full, we can't progress
+ break
- def items_lookup_and_delete_batch(self):
- # batch size is set to the maximum
- batch_size = self.max_entries
- out_batch = ct.c_uint32(0)
- keys = (type(self.Key()) * batch_size)()
- values = (type(self.Leaf()) * batch_size)()
- count = ct.c_uint32(batch_size)
- res = lib.bpf_lookup_and_delete_batch(self.map_fd,
- None,
- ct.byref(out_batch),
- ct.byref(keys),
- ct.byref(values),
- ct.byref(count)
- )
-
- errcode = ct.get_errno()
- if (errcode == errno.EINVAL):
- raise Exception("BPF_MAP_LOOKUP_AND_DELETE_BATCH is invalid.")
-
- if (res != 0 and errcode != errno.ENOENT):
- raise Exception("BPF_MAP_LOOKUP_AND_DELETE_BATCH has failed")
-
- for i in range(0, count.value):
- yield (keys[i], values[i])
+ if ct_cnt.value == 0:
+ # no progress, probably because concurrent update
+ # puts too many elements in one bucket.
+ break
+
+ for i in range(0, total):
+ yield (ct_keys[i], ct_values[i])
def zero(self):
# Even though this is not very efficient, we grab the entire list of
@skipUnless(kernel_version_ge(5, 6), "requires kernel >= 5.6")
class TestMapBatch(TestCase):
MAPSIZE = 1024
+ SUBSET_SIZE = 32
def fill_hashmap(self):
b = BPF(text=b"""BPF_HASH(map, int, int, %d);""" % self.MAPSIZE)
hmap[ct.c_int(i)] = ct.c_int(i)
return hmap
+ def prepare_keys_subset(self, hmap, count=None):
+ if not count:
+ count = self.SUBSET_SIZE
+ keys = (hmap.Key * count)()
+ i = 0
+ for k, _ in sorted(hmap.items_lookup_batch()):
+ if i < count:
+ keys[i] = k
+ i += 1
+ else:
+ break
+
+ return keys
+
+ def prepare_values_subset(self, hmap, count=None):
+ if not count:
+ count = self.SUBSET_SIZE
+ values = (hmap.Leaf * count)()
+ i = 0
+ for _, v in sorted(hmap.items_lookup_batch()):
+ if i < count:
+ values[i] = v*v
+ i += 1
+ else:
+ break
+ return values
+
def check_hashmap_values(self, it):
i = 0
for k, v in sorted(it):
i += 1
return i
- def test_lookup_and_delete_batch(self):
+ def test_lookup_and_delete_batch_all_keys(self):
# fill the hashmap
hmap = self.fill_hashmap()
self.assertEqual(count, self.MAPSIZE)
# and check the delete has worked, i.e map is now empty
- count = sum(1 for _ in hmap.items_lookup_batch())
+ count = sum(1 for _ in hmap.items())
self.assertEqual(count, 0)
- def test_lookup_batch(self):
+ def test_lookup_batch_all_keys(self):
# fill the hashmap
hmap = self.fill_hashmap()
count = self.check_hashmap_values(hmap.items_lookup_batch())
self.assertEqual(count, self.MAPSIZE)
- def test_delete_batch_all_keysp(self):
+ def test_delete_batch_all_keys(self):
# Delete all key/value in the map
# fill the hashmap
hmap = self.fill_hashmap()
# Delete only a subset of key/value in the map
# fill the hashmap
hmap = self.fill_hashmap()
- # Get 4 keys in this map.
- subset_size = 32
- keys = (hmap.Key * subset_size)()
- i = 0
- for k, _ in hmap.items_lookup_batch():
- if i < subset_size:
- keys[i] = k
- i += 1
- else:
- break
+ keys = self.prepare_keys_subset(hmap)
hmap.items_delete_batch(keys)
# check the delete has worked, i.e map is now empty
count = sum(1 for _ in hmap.items())
- self.assertEqual(count, self.MAPSIZE - subset_size)
+ self.assertEqual(count, self.MAPSIZE - self.SUBSET_SIZE)
- def test_update_batch(self):
+ def test_update_batch_all_keys(self):
hmap = self.fill_hashmap()
# preparing keys and new values arrays
count = sum(v.value for v in hmap.values())
self.assertEqual(count, -1*self.MAPSIZE)
+ def test_update_batch_subset(self):
+ # fill the hashmap
+ hmap = self.fill_hashmap()
+ keys = self.prepare_keys_subset(hmap, count=self.SUBSET_SIZE)
+ new_values = self.prepare_values_subset(hmap, count=self.SUBSET_SIZE)
+
+ hmap.items_update_batch(keys, new_values)
+
+ # check all the values in the map
+ # the first self.SUBSET_SIZE keys follow this rule value = keys * keys
+ # the remaning keys follow this rule : value = keys
+ i = 0
+ for k, v in sorted(hmap.items_lookup_batch()):
+ if i < self.SUBSET_SIZE:
+ self.assertEqual(v, k*k) # values are the square of the keys
+ i += 1
+ else:
+ self.assertEqual(v, k) # values = keys
+
+ self.assertEqual(i, self.SUBSET_SIZE)
+
if __name__ == "__main__":
main()