ass="n">flow_cache_lookup); static void flow_cache_flush_tasklet(unsigned long data) { struct flow_flush_info *info = (void *)data; struct flow_cache *fc = info->cache; struct flow_cache_percpu *fcp; struct flow_cache_entry *fle; struct hlist_node *tmp; LIST_HEAD(gc_list); int i, deleted = 0; struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, flow_cache_global); fcp = this_cpu_ptr(fc->percpu); for (i = 0; i < flow_cache_hash_size(fc); i++) { hlist_for_each_entry_safe(fle, tmp, &fcp->hash_table[i], u.hlist) { if (flow_entry_valid(fle, xfrm)) continue; deleted++; hlist_del(&fle->u.hlist); list_add_tail(&fle->u.gc_list, &gc_list); } } flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); if (atomic_dec_and_test(&info->cpuleft)) complete(&info->completion); } /* * Return whether a cpu needs flushing. Conservatively, we assume * the presence of any entries means the core may require flushing, * since the flow_cache_ops.check() function may assume it's running * on the same core as the per-cpu cache component. */ static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp; int i; fcp = per_cpu_ptr(fc->percpu, cpu); for (i = 0; i < flow_cache_hash_size(fc); i++) if (!hlist_empty(&fcp->hash_table[i])) return 0; return 1; } static void flow_cache_flush_per_cpu(void *data) { struct flow_flush_info *info = data; struct tasklet_struct *tasklet; tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; tasklet->data = (unsigned long)info; tasklet_schedule(tasklet); } void flow_cache_flush(struct net *net) { struct flow_flush_info info; cpumask_var_t mask; int i, self; /* Track which cpus need flushing to avoid disturbing all cores. */ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return; cpumask_clear(mask); /* Don't want cpus going down or up during this. */ get_online_cpus(); mutex_lock(&net->xfrm.flow_flush_sem); info.cache = &net->xfrm.flow_cache_global; for_each_online_cpu(i) if (!flow_cache_percpu_empty(info.cache, i)) cpumask_set_cpu(i, mask); atomic_set(&info.cpuleft, cpumask_weight(mask)); if (atomic_read(&info.cpuleft) == 0) goto done; init_completion(&info.completion); local_bh_disable(); self = cpumask_test_and_clear_cpu(smp_processor_id(), mask); on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0); if (self) flow_cache_flush_tasklet((unsigned long)&info); local_bh_enable(); wait_for_completion(&info.completion); done: mutex_unlock(&net->xfrm.flow_flush_sem); put_online_cpus(); free_cpumask_var(mask); } static void flow_cache_flush_task(struct work_struct *work) { struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, flow_cache_flush_work); struct net *net = container_of(xfrm, struct net, xfrm); flow_cache_flush(net); } void flow_cache_flush_deferred(struct net *net) { schedule_work(&net->xfrm.flow_cache_flush_work); } static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); if (!fcp->hash_table) { fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); if (!fcp->hash_table) { pr_err("NET: failed to allocate flow cache sz %zu\n", sz); return -ENOMEM; } fcp->hash_rnd_recalc = 1; fcp->hash_count = 0; tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); } return 0; } static int flow_cache_cpu(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); int res, cpu = (unsigned long) hcpu; struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: res = flow_cache_cpu_prepare(fc, cpu); if (res) return notifier_from_errno(res); break; case CPU_DEAD: case CPU_DEAD_FROZEN: __flow_cache_shrink(fc, fcp, 0); break; } return NOTIFY_OK; } int flow_cache_init(struct net *net) { int i; struct flow_cache *fc = &net->xfrm.flow_cache_global; if (!flow_cachep) flow_cachep = kmem_cache_create("flow_cache", sizeof(struct flow_cache_entry), 0, SLAB_PANIC, NULL); spin_lock_init(&net->xfrm.flow_cache_gc_lock); INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list); INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task); INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task); mutex_init(&net->xfrm.flow_flush_sem); atomic_set(&net->xfrm.flow_cache_gc_count, 0); fc->hash_shift = 10; fc->low_watermark = 2 * flow_cache_hash_size(fc); fc->high_watermark = 4 * flow_cache_hash_size(fc); fc->percpu = alloc_percpu(struct flow_cache_percpu); if (!fc->percpu) return -ENOMEM; cpu_notifier_register_begin(); for_each_online_cpu(i) { if (flow_cache_cpu_prepare(fc, i)) goto err; } fc->hotcpu_notifier = (struct notifier_block){ .notifier_call = flow_cache_cpu, }; __register_hotcpu_notifier(&fc->hotcpu_notifier); cpu_notifier_register_done(); setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, (unsigned long) fc); fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; add_timer(&fc->rnd_timer); return 0; err: for_each_possible_cpu(i) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); kfree(fcp->hash_table); fcp->hash_table = NULL; } cpu_notifier_register_done(); free_percpu(fc->percpu); fc->percpu = NULL; return -ENOMEM; } EXPORT_SYMBOL(flow_cache_init); void flow_cache_fini(struct net *net) { int i; struct flow_cache *fc = &net->xfrm.flow_cache_global; del_timer_sync(&fc->rnd_timer); unregister_hotcpu_notifier(&fc->hotcpu_notifier); for_each_possible_cpu(i) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); kfree(fcp->hash_table); fcp->hash_table = NULL; } free_percpu(fc->percpu); fc->percpu = NULL; } EXPORT_SYMBOL(flow_cache_fini);