5 * Copyright (C) 2007-2013 Intel Corporation. All rights reserved.
6 * Copyright (C) 2012-2014 BMW Car IT GmbH.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
31 #include <sys/errno.h>
32 #include <sys/socket.h>
41 unsigned int use_count;
42 struct connman_ippool *pool;
45 struct connman_ippool {
46 unsigned int refcount;
48 struct address_info *info;
56 ippool_collision_cb_t collision_cb;
60 GSList *allocated_blocks;
62 static uint32_t last_block;
63 static uint32_t block_16_bits;
64 static uint32_t block_20_bits;
65 static uint32_t block_24_bits;
66 static uint32_t subnet_mask_24;
68 struct connman_ippool *
69 __connman_ippool_ref_debug(struct connman_ippool *pool,
70 const char *file, int line, const char *caller)
72 DBG("%p ref %d by %s:%d:%s()", pool, pool->refcount + 1,
75 __sync_fetch_and_add(&pool->refcount, 1);
80 void __connman_ippool_unref_debug(struct connman_ippool *pool,
81 const char *file, int line, const char *caller)
86 DBG("%p ref %d by %s:%d:%s()", pool, pool->refcount - 1,
89 if (__sync_fetch_and_sub(&pool->refcount, 1) != 1)
93 allocated_blocks = g_slist_remove(allocated_blocks, pool->info);
97 g_free(pool->gateway);
98 g_free(pool->broadcast);
99 g_free(pool->start_ip);
100 g_free(pool->end_ip);
101 g_free(pool->subnet_mask);
106 static char *get_ip(uint32_t ip)
110 addr.s_addr = htonl(ip);
112 return g_strdup(inet_ntoa(addr));
115 static uint32_t next_block(uint32_t block)
120 * Return the next IP block within the private IP range
122 * 16-bit block 192.168.0.0 – 192.168.255.255
123 * 20-bit block 172.16.0.0 – 172.31.255.255
124 * 24-bit block 10.0.0.0 – 10.255.255.255
127 next = (block & 0x0000ff00) >> 8;
131 if ((block & 0xffff0000) == block_16_bits) {
133 * Reached the end of the 16 bit block, switch
134 * to the 20-bit block.
136 return block_20_bits;
139 if ((block & 0xffff0000) >= block_20_bits) {
140 next = (block & 0x00ff0000) >> 16;
141 if (next >= 16 && next < 32)
146 * Reached the end of the 20 bit
147 * block, switch to the 24-bit block.
149 return block_24_bits;
152 return (block & 0xff000000) |
153 ((next << 16) & 0x00ff0000);
156 if ((block & 0xff000000) == block_24_bits) {
157 next = (block & 0x00ff0000) >> 16;
163 * Reached the end of the 24 bit
164 * block, switch to the 16-bit block.
166 return block_16_bits;
169 return (block & 0xff000000) |
170 ((next << 16) & 0x00ff0000);
174 return (block & 0xffff0000) | ((next << 8) & 0x0000ff00);
177 static uint32_t get_free_block(unsigned int size)
179 struct address_info *info;
185 * Instead starting always from the 16 bit block, we start
186 * from the last assigned block. This is a simple optimimazion
187 * for the case where a lot of blocks have been assigned, e.g.
188 * the first half of the private IP pool is in use and a new
189 * we need to find a new block.
191 * To only thing we have to make sure is that we terminated if
192 * there is no block left.
197 block = block_16_bits;
201 for (list = allocated_blocks; list; list = list->next) {
204 if (info->start <= block && block <= info->end) {
213 block = next_block(block);
214 } while (block != last_block);
219 static struct address_info *lookup_info(int index, uint32_t start)
223 for (list = allocated_blocks; list; list = list->next) {
224 struct address_info *info = list->data;
226 if (info->index == index && info->start == start)
233 static bool is_private_address(uint32_t address)
237 a = (address & 0xff000000) >> 24;
238 b = (address & 0x00ff0000) >> 16;
240 if (a == 10 || (a == 192 && b == 168) ||
241 (a == 172 && (b >= 16 && b <= 31)))
247 void __connman_ippool_newaddr(int index, const char *address,
248 unsigned char prefixlen)
250 struct address_info *info, *it;
252 uint32_t start, end, mask;
255 if (inet_aton(address, &inp) == 0)
258 start = ntohl(inp.s_addr);
259 if (!is_private_address(start))
265 mask = ~(0xffffffff >> prefixlen);
267 start = start & mask;
270 info = lookup_info(index, start);
274 info = g_try_new0(struct address_info, 1);
282 allocated_blocks = g_slist_prepend(allocated_blocks, info);
285 info->use_count = info->use_count + 1;
287 if (info->use_count > 1 || info->pool) {
289 * We need only to check for the first IP in a block for
295 for (list = allocated_blocks; list; list = list->next) {
301 if (!(info->start >= it->start && info->start <= it->end))
304 if (it->pool && it->pool->collision_cb)
305 it->pool->collision_cb(it->pool, it->pool->user_data);
311 void __connman_ippool_deladdr(int index, const char *address,
312 unsigned char prefixlen)
314 struct address_info *info;
316 uint32_t start, mask;
318 if (inet_aton(address, &inp) == 0)
321 start = ntohl(inp.s_addr);
322 if (!is_private_address(start))
325 mask = ~(0xffffffff >> prefixlen);
326 start = start & mask;
328 info = lookup_info(index, start);
330 /* In theory this should never happen */
331 connman_error("Inconsistent IP pool management (start not found)");
335 info->use_count = info->use_count - 1;
339 if (info->use_count > 0)
342 allocated_blocks = g_slist_remove(allocated_blocks, info);
346 struct connman_ippool *__connman_ippool_create(int index,
349 ippool_collision_cb_t collision_cb,
352 struct connman_ippool *pool;
353 struct address_info *info;
359 * The range is at max 255 and we don't support overlapping
362 if (start + range > 254) {
363 connman_error("IP pool does not support pool size larger than 254");
367 block = get_free_block(start + range);
369 connman_warn("Could not find a free IP block");
373 pool = g_try_new0(struct connman_ippool, 1);
377 info = g_try_new0(struct address_info, 1);
387 info->end = block + range;
391 pool->collision_cb = collision_cb;
392 pool->user_data = user_data;
399 pool->gateway = get_ip(info->start + 1);
400 pool->broadcast = get_ip(info->start + 255);
401 pool->subnet_mask = get_ip(subnet_mask_24);
402 pool->start_ip = get_ip(block + start);
403 pool->end_ip = get_ip(block + start + range);
405 allocated_blocks = g_slist_prepend(allocated_blocks, info);
410 const char *__connman_ippool_get_gateway(struct connman_ippool *pool)
412 return pool->gateway;
415 const char *__connman_ippool_get_broadcast(struct connman_ippool *pool)
417 return pool->broadcast;
420 const char *__connman_ippool_get_start_ip(struct connman_ippool *pool)
422 return pool->start_ip;
425 const char *__connman_ippool_get_end_ip(struct connman_ippool *pool)
430 const char *__connman_ippool_get_subnet_mask(struct connman_ippool *pool)
432 return pool->subnet_mask;
435 int __connman_ippool_init(void)
439 block_16_bits = ntohl(inet_addr("192.168.0.0"));
440 block_20_bits = ntohl(inet_addr("172.16.0.0"));
441 block_24_bits = ntohl(inet_addr("10.0.0.0"));
442 subnet_mask_24 = ntohl(inet_addr("255.255.255.0"));
447 void __connman_ippool_cleanup(void)
451 g_slist_free_full(allocated_blocks, g_free);
453 allocated_blocks = NULL;