1 /* SPDX-License-Identifier: LGPL-2.1-only */
2 /*
3 * Copyright (c) 2003-2012 Thomas Graf <[email protected]>
4 */
5
6 /**
7 * @ingroup cache_mngt
8 * @defgroup cache_mngr Manager
9 * @brief Manager keeping caches up to date automatically.
10 *
11 * The cache manager keeps caches up to date automatically by listening to
12 * netlink notifications and integrating the received information into the
13 * existing cache.
14 *
15 * @note This functionality is still considered experimental.
16 *
17 * Related sections in the development guide:
18 * - @core_doc{_cache_manager,Cache Manager}
19 *
20 * @{
21 *
22 * Header
23 * ------
24 * ~~~~{.c}
25 * #include <netlink/cache.h>
26 * ~~~~
27 */
28
29 #include "nl-default.h"
30
31 #include <netlink/netlink.h>
32 #include <netlink/cache.h>
33 #include <netlink/utils.h>
34
35 #include "nl-core.h"
36 #include "nl-priv-dynamic-core/nl-core.h"
37 #include "nl-priv-dynamic-core/cache-api.h"
38 #include "nl-aux-core/nl-core.h"
39
40 #define NL_ALLOCATED_SYNC_SOCK 4
41
42 /** @cond SKIP */
43 struct nl_cache_mngr
44 {
45 int cm_protocol;
46 int cm_flags;
47 int cm_nassocs;
48 struct nl_sock * cm_sock;
49 struct nl_sock * cm_sync_sock;
50 struct nl_cache_assoc * cm_assocs;
51 };
52
53 #define NASSOC_INIT 16
54 #define NASSOC_EXPAND 8
55 /** @endcond */
56
include_cb(struct nl_object * obj,struct nl_parser_param * p)57 static int include_cb(struct nl_object *obj, struct nl_parser_param *p)
58 {
59 struct nl_cache_assoc *ca = p->pp_arg;
60 struct nl_cache_ops *ops = ca->ca_cache->c_ops;
61
62 NL_DBG(2, "Including object %p into cache %p\n", obj, ca->ca_cache);
63
64 if (NL_DEBUG && nl_debug >= 4)
65 nl_object_dump(obj, &nl_debug_dp);
66
67 if (ops->co_event_filter)
68 if (ops->co_event_filter(ca->ca_cache, obj) != NL_OK)
69 return 0;
70
71 if (ops->co_include_event)
72 return ops->co_include_event(ca->ca_cache, obj, ca->ca_change,
73 ca->ca_change_v2,
74 ca->ca_change_data);
75 else {
76 if (ca->ca_change_v2)
77 return nl_cache_include_v2(ca->ca_cache, obj, ca->ca_change_v2, ca->ca_change_data);
78 else
79 return nl_cache_include(ca->ca_cache, obj, ca->ca_change, ca->ca_change_data);
80 }
81
82 }
83
event_input(struct nl_msg * msg,void * arg)84 static int event_input(struct nl_msg *msg, void *arg)
85 {
86 struct nl_cache_mngr *mngr = arg;
87 int protocol = nlmsg_get_proto(msg);
88 int type = nlmsg_hdr(msg)->nlmsg_type;
89 struct nl_cache_ops *ops;
90 int i, n;
91 struct nl_parser_param p = {
92 .pp_cb = include_cb,
93 };
94
95 NL_DBG(2, "Cache manager %p, handling new message %p as event\n",
96 mngr, msg);
97
98 if (NL_DEBUG && nl_debug >= 4)
99 nl_msg_dump(msg, stderr);
100
101 if (mngr->cm_protocol != protocol)
102 BUG();
103
104 for (i = 0; i < mngr->cm_nassocs; i++) {
105 if (mngr->cm_assocs[i].ca_cache) {
106 ops = mngr->cm_assocs[i].ca_cache->c_ops;
107 for (n = 0; ops->co_msgtypes[n].mt_id >= 0; n++)
108 if (ops->co_msgtypes[n].mt_id == type)
109 goto found;
110 }
111 }
112
113 return NL_SKIP;
114
115 found:
116 NL_DBG(2, "Associated message %p to cache %p\n",
117 msg, mngr->cm_assocs[i].ca_cache);
118 p.pp_arg = &mngr->cm_assocs[i];
119
120 return nl_cache_parse(ops, NULL, nlmsg_hdr(msg), &p);
121 }
122
123 /**
124 * Allocate new cache manager
125 * @arg sk Netlink socket or NULL to auto allocate
126 * @arg protocol Netlink protocol this manager is used for
127 * @arg flags Flags (\c NL_AUTO_PROVIDE)
128 * @arg result Result pointer
129 *
130 * Allocates a new cache manager for the specified netlink protocol.
131 *
132 * 1. If sk is not specified (\c NULL) a netlink socket matching the
133 * specified protocol will be automatically allocated.
134 *
135 * 2. The socket will be put in non-blocking mode and sequence checking
136 * will be disabled regardless of whether the socket was provided by
137 * the caller or automatically allocated.
138 *
139 * 3. The socket will be connected.
140 *
141 * If the flag \c NL_AUTO_PROVIDE is specified, any cache added to the
142 * manager will automatically be made available to other users using
143 * nl_cache_mngt_provide().
144 *
145 * @note If the socket is provided by the caller, it is NOT recommended
146 * to use the socket for anything else besides receiving netlink
147 * notifications.
148 *
149 * @return 0 on success or a negative error code.
150 */
nl_cache_mngr_alloc(struct nl_sock * sk,int protocol,int flags,struct nl_cache_mngr ** result)151 int nl_cache_mngr_alloc(struct nl_sock *sk, int protocol, int flags,
152 struct nl_cache_mngr **result)
153 {
154 return nl_cache_mngr_alloc_ex(sk, NULL, protocol, flags, result);
155 }
156
157 /**
158 * Allocate new cache manager, with custom callback on refill socket
159 * @arg sk Netlink socket or NULL to auto allocate
160 * @arg sync_sk Blocking Netlink socket for cache refills
161 * @arg protocol Netlink protocol this manager is used for
162 * @arg flags Flags (\c NL_AUTO_PROVIDE)
163 * @arg result Result pointer
164 *
165 * Same as \f nl_cache_mngr_alloc, but sets custom refill socket
166 * Note: ownership of the sync_sk passes to the cache manager
167 */
nl_cache_mngr_alloc_ex(struct nl_sock * sk,struct nl_sock * sync_sk,int protocol,int flags,struct nl_cache_mngr ** result)168 int nl_cache_mngr_alloc_ex(struct nl_sock *sk, struct nl_sock *sync_sk, int protocol, int flags,
169 struct nl_cache_mngr **result)
170 {
171 _nl_auto_nl_cache_mngr struct nl_cache_mngr *mngr = NULL;
172 int err;
173
174 /* Catch abuse of flags */
175 if (flags & NL_ALLOCATED_SOCK)
176 BUG();
177 flags = flags & NL_AUTO_PROVIDE;
178
179 mngr = calloc(1, sizeof(*mngr));
180 if (!mngr)
181 return -NLE_NOMEM;
182
183 mngr->cm_flags = flags;
184
185 if (!sk) {
186 if (!(sk = nl_socket_alloc()))
187 return -NLE_NOMEM;
188 mngr->cm_flags |= NL_ALLOCATED_SOCK;
189 }
190 mngr->cm_sock = sk;
191
192 if(!sync_sk) {
193 if (!(sync_sk = nl_socket_alloc()))
194 return -NLE_NOMEM;
195 mngr->cm_flags |= NL_ALLOCATED_SYNC_SOCK;
196 }
197 mngr->cm_sync_sock = sync_sk;
198
199 mngr->cm_nassocs = NASSOC_INIT;
200 mngr->cm_protocol = protocol;
201 mngr->cm_assocs = calloc(mngr->cm_nassocs,
202 sizeof(struct nl_cache_assoc));
203 if (!mngr->cm_assocs)
204 return -NLE_NOMEM;
205
206 /* Required to receive async event notifications */
207 nl_socket_disable_seq_check(mngr->cm_sock);
208
209 if ((err = nl_connect(mngr->cm_sock, protocol)) < 0)
210 return err;
211
212 if ((err = nl_socket_set_nonblocking(mngr->cm_sock)) < 0)
213 return err;
214
215 if ((err = nl_connect(mngr->cm_sync_sock, protocol)) < 0)
216 return err;
217
218 NL_DBG(1, "Allocated cache manager %p, protocol %d, %d caches\n",
219 mngr, protocol, mngr->cm_nassocs);
220
221 *result = _nl_steal_pointer(&mngr);
222 return 0;
223 }
224
225 /**
226 * Set change_func_v2 for cache manager
227 * @arg mngr Cache manager.
228 * @arg cache Cache associated with the callback
229 * @arg cb Function to be called upon changes.
230 * @arg data Argument passed on to change callback
231 *
232 * Adds callback change_func_v2 to a registered cache. This callback provides
233 * in like the standard change_func the added or remove netlink object. In case
234 * of a change the old and the new object is provided as well as the according
235 * diff. If this callback is registered this has a higher priority then the
236 * change_func registered during cache registration. Hence only one callback is
237 * executed.
238 *
239 * The first netlink object in the callback is refering to the old object and
240 * the second to the new. This means on NL_ACT_CHANGE the first is the previous
241 * object in the cache and the second the updated version. On NL_ACT_DEL the
242 * first is the deleted object the second is NULL. On NL_ACT_NEW the first is
243 * NULL and the second the new netlink object.
244 *
245 * The user is responsible for calling nl_cache_mngr_poll() or monitor
246 * the socket and call nl_cache_mngr_data_ready() to allow the library
247 * to process netlink notification events.
248 *
249 * @see nl_cache_mngr_poll()
250 * @see nl_cache_mngr_data_ready()
251 *
252 * @return 0 on success or a negative error code.
253 * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and
254 * cache type
255 * @return -NLE_OPNOTSUPP Cache type does not support updates
256 * @return -NLE_RANGE Cache of this type is not registered
257 */
nl_cache_mngr_set_change_func_v2(struct nl_cache_mngr * mngr,struct nl_cache * cache,change_func_v2_t cb,void * data)258 static int nl_cache_mngr_set_change_func_v2(struct nl_cache_mngr *mngr,
259 struct nl_cache *cache,
260 change_func_v2_t cb, void *data)
261 {
262 struct nl_cache_ops *ops;
263 int i;
264
265 ops = cache->c_ops;
266 if (!ops)
267 return -NLE_INVAL;
268
269 if (ops->co_protocol != mngr->cm_protocol)
270 return -NLE_PROTO_MISMATCH;
271
272 if (ops->co_groups == NULL)
273 return -NLE_OPNOTSUPP;
274
275 for (i = 0; i < mngr->cm_nassocs; i++)
276 if (mngr->cm_assocs[i].ca_cache == cache)
277 break;
278
279 if (i >= mngr->cm_nassocs) {
280 return -NLE_RANGE;
281 }
282
283 mngr->cm_assocs[i].ca_change_v2 = cb;
284 mngr->cm_assocs[i].ca_change_data = data;
285
286 return 0;
287 }
288
289 /**
290 * Add cache to cache manager
291 * @arg mngr Cache manager.
292 * @arg cache Cache to be added to cache manager
293 * @arg cb Function to be called upon changes.
294 * @arg data Argument passed on to change callback
295 *
296 * Adds cache to the manager. The operation will trigger a full
297 * dump request from the kernel to initially fill the contents
298 * of the cache. The manager will subscribe to the notification group
299 * of the cache and keep track of any further changes.
300 *
301 * The user is responsible for calling nl_cache_mngr_poll() or monitor
302 * the socket and call nl_cache_mngr_data_ready() to allow the library
303 * to process netlink notification events.
304 *
305 * @see nl_cache_mngr_poll()
306 * @see nl_cache_mngr_data_ready()
307 *
308 * @return 0 on success or a negative error code.
309 * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and
310 * cache type
311 * @return -NLE_OPNOTSUPP Cache type does not support updates
312 * @return -NLE_EXIST Cache of this type already being managed
313 */
nl_cache_mngr_add_cache(struct nl_cache_mngr * mngr,struct nl_cache * cache,change_func_t cb,void * data)314 int nl_cache_mngr_add_cache(struct nl_cache_mngr *mngr, struct nl_cache *cache,
315 change_func_t cb, void *data)
316 {
317 struct nl_cache_ops *ops;
318 struct nl_af_group *grp;
319 int err, i;
320
321 ops = cache->c_ops;
322 if (!ops)
323 return -NLE_INVAL;
324
325 if (ops->co_protocol != mngr->cm_protocol)
326 return -NLE_PROTO_MISMATCH;
327
328 if (ops->co_groups == NULL)
329 return -NLE_OPNOTSUPP;
330
331 for (i = 0; i < mngr->cm_nassocs; i++)
332 if (mngr->cm_assocs[i].ca_cache &&
333 mngr->cm_assocs[i].ca_cache->c_ops == ops)
334 return -NLE_EXIST;
335
336 for (i = 0; i < mngr->cm_nassocs; i++)
337 if (!mngr->cm_assocs[i].ca_cache)
338 break;
339
340 if (i >= mngr->cm_nassocs) {
341 struct nl_cache_assoc *cm_assocs;
342 int cm_nassocs = mngr->cm_nassocs + NASSOC_EXPAND;
343
344 cm_assocs = realloc(mngr->cm_assocs,
345 cm_nassocs * sizeof(struct nl_cache_assoc));
346 if (cm_assocs == NULL)
347 return -NLE_NOMEM;
348
349 memset(cm_assocs + mngr->cm_nassocs, 0,
350 NASSOC_EXPAND * sizeof(struct nl_cache_assoc));
351 mngr->cm_assocs = cm_assocs;
352 mngr->cm_nassocs = cm_nassocs;
353
354 NL_DBG(1, "Increased capacity of cache manager %p " \
355 "to %d\n", mngr, mngr->cm_nassocs);
356 }
357
358 for (grp = ops->co_groups; grp->ag_group; grp++) {
359 err = nl_socket_add_membership(mngr->cm_sock, grp->ag_group);
360 if (err < 0)
361 return err;
362 }
363
364 err = nl_cache_refill(mngr->cm_sync_sock, cache);
365 if (err < 0)
366 goto errout_drop_membership;
367
368 mngr->cm_assocs[i].ca_cache = cache;
369 mngr->cm_assocs[i].ca_change = cb;
370 mngr->cm_assocs[i].ca_change_data = data;
371
372 if (mngr->cm_flags & NL_AUTO_PROVIDE)
373 nl_cache_mngt_provide(cache);
374
375 NL_DBG(1, "Added cache %p <%s> to cache manager %p\n",
376 cache, nl_cache_name(cache), mngr);
377
378 return 0;
379
380 errout_drop_membership:
381 for (grp = ops->co_groups; grp->ag_group; grp++)
382 nl_socket_drop_membership(mngr->cm_sock, grp->ag_group);
383
384 return err;
385 }
386
387 /**
388 * Add cache to cache manager
389 * @arg mngr Cache manager.
390 * @arg cache Cache to be added to cache manager
391 * @arg cb V2 function to be called upon changes.
392 * @arg data Argument passed on to change callback
393 *
394 * Adds cache to the manager. The operation will trigger a full
395 * dump request from the kernel to initially fill the contents
396 * of the cache. The manager will subscribe to the notification group
397 * of the cache and keep track of any further changes.
398 *
399 * The user is responsible for calling nl_cache_mngr_poll() or monitor
400 * the socket and call nl_cache_mngr_data_ready() to allow the library
401 * to process netlink notification events.
402 *
403 * @see nl_cache_mngr_poll()
404 * @see nl_cache_mngr_data_ready()
405 *
406 * @return 0 on success or a negative error code.
407 * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and
408 * cache type
409 * @return -NLE_OPNOTSUPP Cache type does not support updates
410 * @return -NLE_EXIST Cache of this type already being managed
411 */
nl_cache_mngr_add_cache_v2(struct nl_cache_mngr * mngr,struct nl_cache * cache,change_func_v2_t cb,void * data)412 int nl_cache_mngr_add_cache_v2(struct nl_cache_mngr *mngr, struct nl_cache *cache,
413 change_func_v2_t cb, void *data) {
414 int err;
415 err = nl_cache_mngr_add_cache(mngr, cache, NULL, NULL);
416 if (err < 0)
417 return err;
418
419 return nl_cache_mngr_set_change_func_v2(mngr, cache, cb, data);
420 }
421
422 /**
423 * Add cache to cache manager
424 * @arg mngr Cache manager.
425 * @arg name Name of cache to keep track of
426 * @arg cb Function to be called upon changes.
427 * @arg data Argument passed on to change callback
428 * @arg result Pointer to store added cache (optional)
429 *
430 * Allocates a new cache of the specified type and adds it to the manager.
431 * The operation will trigger a full dump request from the kernel to
432 * initially fill the contents of the cache. The manager will subscribe
433 * to the notification group of the cache and keep track of any further
434 * changes.
435 *
436 * The user is responsible for calling nl_cache_mngr_poll() or monitor
437 * the socket and call nl_cache_mngr_data_ready() to allow the library
438 * to process netlink notification events.
439 *
440 * @note Versions up to 3.4.0 actually required the result argument, preventing
441 * NULL to be passed.
442 *
443 * @see nl_cache_mngr_poll()
444 * @see nl_cache_mngr_data_ready()
445 *
446 * @return 0 on success or a negative error code.
447 * @return -NLE_NOCACHE Unknown cache type
448 * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and
449 * cache type
450 * @return -NLE_OPNOTSUPP Cache type does not support updates
451 * @return -NLE_EXIST Cache of this type already being managed
452 */
nl_cache_mngr_add(struct nl_cache_mngr * mngr,const char * name,change_func_t cb,void * data,struct nl_cache ** result)453 int nl_cache_mngr_add(struct nl_cache_mngr *mngr, const char *name,
454 change_func_t cb, void *data, struct nl_cache **result)
455 {
456 struct nl_cache_ops *ops;
457 struct nl_cache *cache;
458 int err;
459
460 ops = nl_cache_ops_lookup_safe(name);
461 if (!ops)
462 return -NLE_NOCACHE;
463
464 cache = nl_cache_alloc(ops);
465 nl_cache_ops_put(ops);
466 if (!cache)
467 return -NLE_NOMEM;
468
469 err = nl_cache_mngr_add_cache(mngr, cache, cb, data);
470 if (err < 0)
471 goto errout_free_cache;
472
473 if (result)
474 *result = cache;
475 return 0;
476
477 errout_free_cache:
478 nl_cache_free(cache);
479
480 return err;
481 }
482
483 /**
484 * Get socket file descriptor
485 * @arg mngr Cache Manager
486 *
487 * Get the file descriptor of the socket associated with the manager.
488 *
489 * @note Do not use the socket for anything besides receiving
490 * notifications.
491 */
nl_cache_mngr_get_fd(struct nl_cache_mngr * mngr)492 int nl_cache_mngr_get_fd(struct nl_cache_mngr *mngr)
493 {
494 return nl_socket_get_fd(mngr->cm_sock);
495 }
496
497 /**
498 * Check for event notifications
499 * @arg mngr Cache Manager
500 * @arg timeout Upper limit poll() will block, in milliseconds.
501 *
502 * Causes poll() to be called to check for new event notifications
503 * being available. Calls nl_cache_mngr_data_ready() to process
504 * available data.
505 *
506 * This functionally is ideally called regularly during an idle
507 * period.
508 *
509 * A timeout can be specified in milliseconds to limit the time the
510 * function will wait for updates.
511 *
512 * @see nl_cache_mngr_data_ready()
513 *
514 * @return The number of messages processed or a negative error code.
515 */
nl_cache_mngr_poll(struct nl_cache_mngr * mngr,int timeout)516 int nl_cache_mngr_poll(struct nl_cache_mngr *mngr, int timeout)
517 {
518 int ret;
519 struct pollfd fds = {
520 .fd = nl_socket_get_fd(mngr->cm_sock),
521 .events = POLLIN,
522 };
523
524 NL_DBG(3, "Cache manager %p, poll() fd %d\n", mngr, fds.fd);
525 ret = poll(&fds, 1, timeout);
526 NL_DBG(3, "Cache manager %p, poll() returned %d\n", mngr, ret);
527 if (ret < 0) {
528 NL_DBG(4, "nl_cache_mngr_poll(%p): poll() failed with %d (%s)\n",
529 mngr, errno, nl_strerror_l(errno));
530 return -nl_syserr2nlerr(errno);
531 }
532
533 /* No events, return */
534 if (ret == 0)
535 return 0;
536
537 return nl_cache_mngr_data_ready(mngr);
538 }
539
540 /**
541 * Receive available event notifications
542 * @arg mngr Cache manager
543 *
544 * This function can be called if the socket associated to the manager
545 * contains updates to be received. This function should only be used
546 * if nl_cache_mngr_poll() is not used.
547 *
548 * The function will process messages until there is no more data to
549 * be read from the socket.
550 *
551 * @see nl_cache_mngr_poll()
552 *
553 * @return The number of messages processed or a negative error code.
554 */
nl_cache_mngr_data_ready(struct nl_cache_mngr * mngr)555 int nl_cache_mngr_data_ready(struct nl_cache_mngr *mngr)
556 {
557 int err, nread = 0;
558 struct nl_cb *cb;
559
560 NL_DBG(2, "Cache manager %p, reading new data from fd %d\n",
561 mngr, nl_socket_get_fd(mngr->cm_sock));
562
563 cb = nl_cb_clone(mngr->cm_sock->s_cb);
564 if (cb == NULL)
565 return -NLE_NOMEM;
566
567 nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, event_input, mngr);
568
569 while ((err = nl_recvmsgs_report(mngr->cm_sock, cb)) > 0) {
570 NL_DBG(2, "Cache manager %p, recvmsgs read %d messages\n",
571 mngr, err);
572 nread += err;
573 }
574
575 nl_cb_put(cb);
576 if (err < 0 && err != -NLE_AGAIN)
577 return err;
578
579 return nread;
580 }
581
582 /**
583 * Print information about cache manager
584 * @arg mngr Cache manager
585 * @arg p Dumping parameters
586 *
587 * Prints information about the cache manager including all managed caches.
588 *
589 * @note This is a debugging function.
590 */
nl_cache_mngr_info(struct nl_cache_mngr * mngr,struct nl_dump_params * p)591 void nl_cache_mngr_info(struct nl_cache_mngr *mngr, struct nl_dump_params *p)
592 {
593 char buf[128];
594 int i;
595
596 nl_dump_line(p, "cache-manager <%p>\n", mngr);
597 nl_dump_line(p, " .protocol = %s\n",
598 nl_nlfamily2str(mngr->cm_protocol, buf, sizeof(buf)));
599 nl_dump_line(p, " .flags = %#x\n", mngr->cm_flags);
600 nl_dump_line(p, " .nassocs = %u\n", mngr->cm_nassocs);
601 nl_dump_line(p, " .sock = <%p>\n", mngr->cm_sock);
602
603 for (i = 0; i < mngr->cm_nassocs; i++) {
604 struct nl_cache_assoc *assoc = &mngr->cm_assocs[i];
605
606 if (assoc->ca_cache) {
607 nl_dump_line(p, " .cache[%d] = <%p> {\n", i, assoc->ca_cache);
608 nl_dump_line(p, " .name = %s\n", assoc->ca_cache->c_ops->co_name);
609 nl_dump_line(p, " .change_func = <%p>\n", assoc->ca_change);
610 nl_dump_line(p, " .change_data = <%p>\n", assoc->ca_change_data);
611 nl_dump_line(p, " .nitems = %u\n", nl_cache_nitems(assoc->ca_cache));
612 nl_dump_line(p, " .objects = {\n");
613
614 p->dp_prefix += 6;
615 nl_cache_dump(assoc->ca_cache, p);
616 p->dp_prefix -= 6;
617
618 nl_dump_line(p, " }\n");
619 nl_dump_line(p, " }\n");
620 }
621 }
622 }
623
624 /**
625 * Free cache manager and all caches.
626 * @arg mngr Cache manager.
627 *
628 * Release all resources held by a cache manager.
629 */
nl_cache_mngr_free(struct nl_cache_mngr * mngr)630 void nl_cache_mngr_free(struct nl_cache_mngr *mngr)
631 {
632 int i;
633
634 if (!mngr)
635 return;
636
637 if (mngr->cm_sock)
638 nl_close(mngr->cm_sock);
639
640 if (mngr->cm_sync_sock)
641 nl_close(mngr->cm_sync_sock);
642
643 if (mngr->cm_flags & NL_ALLOCATED_SOCK)
644 nl_socket_free(mngr->cm_sock);
645
646 if (mngr->cm_flags & NL_ALLOCATED_SYNC_SOCK)
647 nl_socket_free(mngr->cm_sync_sock);
648
649 for (i = 0; i < mngr->cm_nassocs; i++) {
650 if (mngr->cm_assocs[i].ca_cache) {
651 nl_cache_mngt_unprovide(mngr->cm_assocs[i].ca_cache);
652 nl_cache_free(mngr->cm_assocs[i].ca_cache);
653 }
654 }
655
656 free(mngr->cm_assocs);
657
658 NL_DBG(1, "Cache manager %p freed\n", mngr);
659
660 free(mngr);
661 }
662
663 /** @} */
664