aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe
AgeCommit message (Expand)Author
2022-05-09ixgbe: ensure IPsec VF<->PF compatibilityLeon Romanovsky
2022-03-08ixgbe: xsk: change !netif_carrier_ok() handling in ixgbe_xmit_zc()Maciej Fijalkowski
2021-12-22ixgbe: set X550 MDIO speed before talking to PHYCyril Novikov
2021-07-28ixgbe: Fix packet corruption due to missing DMA syncMarkus Boehme
2021-07-28ixgbe: Fix an error handling path in 'ixgbe_probe()'Christophe JAILLET
2021-06-03ixgbe: fix large MTU request from VFJesse Brandeburg
2021-03-30ixgbe: Fix memleak in ixgbe_configure_clsu32Dinghao Liu
2021-03-17ixgbe: fail to create xfrm offload of IPsec tunnel mode SAAntony Antony
2020-12-30ixgbe: avoid premature Rx buffer reuseBjörn Töpel
2020-09-03scsi: fcoe: Fix I/O path allocationMike Christie
2020-07-16ixgbe: protect ring accesses with READ- and WRITE_ONCECiara Loftus
2020-06-22ixgbe: fix signed-integer-overflow warningXie XiuQi
2020-06-22ixgbe: Fix XDP redirect on archs with PAGE_SIZE above 4KJesper Dangaard Brouer
2020-02-05ixgbe: Fix calculation of queue with VFs and flow director on interface flapCambda Zhu
2020-01-12net/ixgbe: Fix concurrency issues between config flow and XSKMaxim Mikityanskiy
2019-12-31ixgbe: protect TX timestamping from API misuseManjunath Patil
2019-11-08ixgbe: need_wakeup flag might not be set for TxMagnus Karlsson
2019-11-01ixgbe: Remove duplicate clear_bit() callIgor Pylypiv
2019-09-16Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-nextDavid S. Miller
2019-09-16ixgbe: fix xdp handle calculationsCiara Loftus
2019-09-15Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/netDavid S. Miller
2019-09-12ixgbe: Fix secpath usage for IPsec TX offload.Steffen Klassert
2019-09-11ixgbe: fix double clean of Tx descriptors with xdpIlya Maximets
2019-09-11ixgbe: Prevent u8 wrapping of ITR value to something less than 10usAlexander Duyck
2019-09-11ixgbe: use skb_get_queue_mapping in tx pathTonghao Zhang
2019-09-11ixgbe: fix memory leaksWenwen Wang
2019-09-09ixgbe: sync the first fragment unconditionallyFiro Yang
2019-09-06Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-nextDavid S. Miller
2019-09-05ixgbe: fix xdp handle calculationsKevin Laatz
2019-09-05ixgbe: Use kzfree() rather than its implementation.zhong jiang
2019-08-31ixgbe: modify driver for handling offsetsKevin Laatz
2019-08-31ixgbe: simplify Rx buffer recycleKevin Laatz
2019-08-19Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/netDavid S. Miller
2019-08-17ixgbe: add support for AF_XDP need_wakeup featureMagnus Karlsson
2019-08-17xsk: replace ndo_xsk_async_xmit with ndo_xsk_wakeupMagnus Karlsson
2019-08-10ixgbe: no need to check return value of debugfs_create functionsGreg Kroah-Hartman
2019-08-09ixgbe: fix possible deadlock in ixgbe_service_task()Taehee Yoo
2019-07-30net: Use skb_frag_off accessorsJonathan Lemon
2019-07-22net: Use skb accessors in network driversMatthew Wilcox (Oracle)
2019-07-09drivers: net: use flow block APIPablo Neira Ayuso
2019-07-09net: flow_offload: add flow_block_cb_setup_simple()Pablo Neira Ayuso
2019-07-04Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-nextDavid S. Miller
2019-06-28ixgbe: fix potential u32 overflow on shiftColin Ian King
2019-06-28ixgbe: Avoid NULL pointer dereference with VF on non-IPsec hwDann Frazier
2019-06-27xsk: Return the whole xdp_desc from xsk_umem_consume_txMaxim Mikityanskiy
2019-06-26ixgbe: Check DDM existence in transceiver before accessMauro S. M. Rodrigues
2019-06-05ixgbe: implement support for SDP/PPS output on X550 hardwareJacob Keller
2019-06-05ixgbe: Use LLDP ethertype define ETH_P_LLDPAnirudh Venkataramanan
2019-06-05ixgbe: add a kernel documentation comment for ixgbe_ptp_get_ts_configJacob Keller
2019-06-05ixgbe: use 'cc' instead of 'hw_cc' for local variableJacob Keller
WARN_ON_ONCE(count < 1)) { err = -EINVAL; goto exit; } /* * If requesting per event more than the global cap, * return a different error to help userspace figure * this out. * * And also do it here so that we have &callchain_mutex held. */ if (event_max_stack > sysctl_perf_event_max_stack) { err = -EOVERFLOW; goto exit; } if (count == 1) err = alloc_callchain_buffers(); exit: if (err) atomic_dec(&nr_callchain_events); mutex_unlock(&callchain_mutex); return err; } void put_callchain_buffers(void) { if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { release_callchain_buffers(); mutex_unlock(&callchain_mutex); } } static struct perf_callchain_entry *get_callchain_entry(int *rctx) { int cpu; struct callchain_cpus_entries *entries; *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion)); if (*rctx == -1) return NULL; entries = rcu_dereference(callchain_cpus_entries); if (!entries) return NULL; cpu = smp_processor_id(); return (((void *)entries->cpu_entries[cpu]) + (*rctx * perf_callchain_entry__sizeof())); } static void put_callchain_entry(int rctx) { put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); } struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark) { struct perf_callchain_entry *entry; struct perf_callchain_entry_ctx ctx; int rctx; entry = get_callchain_entry(&rctx); if (rctx == -1) return NULL; if (!entry) goto exit_put; ctx.entry = entry; ctx.max_stack = max_stack; ctx.nr = entry->nr = init_nr; ctx.contexts = 0; ctx.contexts_maxed = false; if (kernel && !user_mode(regs)) { if (add_mark) perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL); perf_callchain_kernel(&ctx, regs); } if (user) { if (!user_mode(regs)) { if (current->mm) regs = task_pt_regs(current); else regs = NULL; } if (regs) { mm_segment_t fs; if (crosstask) goto exit_put; if (add_mark) perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); fs = get_fs(); set_fs(USER_DS); perf_callchain_user(&ctx, regs); set_fs(fs); } } exit_put: put_callchain_entry(rctx); return entry; } /* * Used for sysctl_perf_event_max_stack and * sysctl_perf_event_max_contexts_per_stack. */ int perf_event_max_stack_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *value = table->data; int new_value = *value, ret; struct ctl_table new_table = *table; new_table.data = &new_value; ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos); if (ret || !write) return ret; mutex_lock(&callchain_mutex); if (atomic_read(&nr_callchain_events)) ret = -EBUSY; else *value = new_value; mutex_unlock(&callchain_mutex); return ret; }