| #ifndef ECORE_ERASE |
| #ifdef __LINUX |
| |
| #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| |
| #include <linux/kernel.h> |
| #include <linux/types.h> |
| #include <asm/byteorder.h> |
| #include <linux/version.h> |
| #include <linux/module.h> |
| #include <linux/crc32.h> |
| #include <linux/etherdevice.h> |
| |
| #define ECORE_ALIGN(x, a) ALIGN(x, a) |
| #endif |
| |
| /* Always define ECORE_OOO for VBD */ |
| #define ECORE_OOO |
| |
| #include "bcmtype.h" |
| #include "utils.h" |
| #include "lm5710.h" |
| #include "ecore_sp_verbs.h" |
| #include "command.h" |
| #include "debug.h" |
| #include "ecore_common.h" |
| |
| /************************ Debug print macros **********************************/ |
| #if !defined(UEFI) && defined(DBG) |
| #define ECORE_MSG(pdev, m, ...) \ |
| DbgMessage(pdev, WARNi, m, ##__VA_ARGS__) |
| #else |
| #define ECORE_MSG |
| #endif |
| |
| /************************ Error prints ****************************************/ |
| #if !defined(UEFI) && defined(DBG) |
| #define ECORE_ERR(str, ...) DbgMessage(pdev, FATAL, str, ##__VA_ARGS__) |
| #else |
| #define ECORE_ERR |
| #endif |
| |
| |
| /*********************** ECORE WRAPPER MACROS ********************************/ |
| |
| #define ECORE_RET_PENDING(pending_bit, pending) \ |
| (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS) |
| |
| #define ECORE_ZALLOC(_size, _flags, _pdev) mm_rt_zalloc_mem(_pdev, _size) |
| #define ECORE_CALLOC(_len, _size, _flags, _pdev) mm_rt_zalloc_mem(_pdev, _len * _size) |
| #define ECORE_FREE(_pdev, _buf, _size) mm_rt_free_mem(_pdev, _buf, _size, 0) |
| |
| /* |
| * Ecore implementation of set/get flag |
| * (differs from VBD set_flags, get_flags) |
| */ |
| #define ECORE_SET_FLAG(value, mask, flag) \ |
| do {\ |
| (value) &= ~(mask);\ |
| (value) |= ((flag) << (mask##_SHIFT));\ |
| } while (0) |
| |
| #define ECORE_GET_FLAG(value, mask) \ |
| (((value) &= (mask)) >> (mask##_SHIFT)) |
| |
| #define ecore_sp_post(_pdev, _cmd , _cid, _data, _con_type) \ |
| lm_sq_post(_pdev, _cid, (u8)(_cmd), CMD_PRIORITY_NORMAL, _con_type, \ |
| _data) |
| |
| #define ECORE_SET_CTX_VALIDATION(_pdev, _cxt, _cid) \ |
| lm_set_cdu_validation_data(_pdev, _cid, FALSE) /* context? type? */ |
| /************************ TODO for LM people!!! *******************************/ |
| #define ECORE_TODO_UPDATE_COALESCE_SB_INDEX(a1, a2, a3, a4, a5) |
| #define ECORE_TODO_LINK_REPORT(pdev) |
| #define ECORE_TODO_FW_COMMAND(_pdev, _drv_msg_code, _val) (-1) |
| |
| /************************ Lists ***********************************************/ |
| #define ECORE_LIST_FOR_EACH_ENTRY(pos, _head, _link, cast) \ |
| for (pos = (cast *)d_list_peek_head(_head); \ |
| pos; \ |
| pos = (cast *)d_list_next_entry(&pos->_link)) |
| |
| /** |
| * ECORE_LIST_FOR_EACH_ENTRY_SAFE - iterate over list of given type |
| * @pos: the type * to use as a loop cursor. |
| * @n: another type * to use as temporary storage |
| * @head: the head for your list. |
| * @member: the name of the list_struct within the struct. |
| * |
| * iterate over list of given type safe against removal of list entry |
| */ |
| #define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, head, member, cast) \ |
| for (pos = (cast *)d_list_peek_head(head), \ |
| n = (pos) ? (cast *)d_list_next_entry(&pos->member) : NULL; \ |
| pos != NULL; \ |
| pos = (cast *)n, \ |
| n = (pos) ? (cast *)d_list_next_entry(&pos->member) : NULL) |
| |
| #define ECORE_LIST_IS_LAST(_link, _list) (_link == (_list)->tail) |
| |
| #define ECORE_LIST_IS_EMPTY(head) \ |
| d_list_is_empty(head) |
| |
| #define ECORE_LIST_FIRST_ENTRY(head, cast, link) \ |
| (cast *)d_list_peek_head(head) |
| |
| #define ECORE_LIST_NEXT(pos, link, cast) \ |
| (cast *)d_list_next_entry(&((pos)->link)) |
| |
| #define ECORE_LIST_INIT(head) \ |
| do { \ |
| d_list_clear(head); \ |
| } while (0) |
| |
| #define ECORE_LIST_PUSH_TAIL(link, head) \ |
| do { \ |
| d_list_push_tail(head, link); \ |
| } while (0) |
| |
| #define ECORE_LIST_PUSH_HEAD(link, head) \ |
| do { \ |
| d_list_push_head(head, link); \ |
| } while (0) |
| |
| #define ECORE_LIST_REMOVE_ENTRY(link, head) \ |
| do { \ |
| d_list_remove_entry(head, link); \ |
| } while (0) |
| |
| #define ECORE_LIST_SPLICE_INIT(new_head, head) \ |
| do { \ |
| d_list_add_head(head, new_head); \ |
| d_list_clear(new_head); \ |
| } while (0) |
| |
| static __inline u32_t ecore_crc32_le(u32_t seed, u8_t *mac, u32_t len) |
| { |
| u32_t packet_buf[2] = {0}; |
| |
| memcpy(((u8_t *)(&packet_buf[0]))+2, &mac[0], 2); |
| memcpy(&packet_buf[1], &mac[2], 4); |
| return SWAP_BYTES32(calc_crc32((u8_t *)packet_buf, 8, seed, 0)); |
| } |
| |
| /************************ Per compilation target ******************************/ |
| #ifdef __LINUX |
| |
| #define ECORE_UNLIKELY unlikely |
| #define ECORE_LIKELY likely |
| |
| #define ecore_atomic_read mm_atomic_read |
| #define ecore_atomic_cmpxchg mm_atomic_cmpxchg |
| #define ecore_atomic_set(a, v) mm_atomic_set((u32_t *)(a), v) |
| #define smp_mb__before_atomic() mm_barrier() |
| #define smp_mb__after_atomic() mm_barrier() |
| |
| /* Other */ |
| #define ECORE_IS_VALID_ETHER_ADDR(_mac) is_valid_ether_addr(_mac) |
| #define ECORE_SET_WAIT_COUNT(_cnt) |
| #define ECORE_SET_WAIT_DELAY_US(_cnt, _delay_us) |
| |
| /* Mutex related */ |
| #define ECORE_MUTEX_INIT(_mutex) mutex_init(_mutex) |
| #define ECORE_MUTEX_LOCK(_mutex) mutex_lock(_mutex) |
| #define ECORE_MUTEX_UNLOCK(_mutex) mutex_unlock(_mutex) |
| |
| #define ECORE_MIGHT_SLEEP() ediag_might_sleep() |
| #define ECORE_TEST_BIT(bit, var) test_bit(bit, var) |
| #define ECORE_TEST_AND_CLEAR_BIT(bit, var) test_and_clear_bit(bit, var) |
| |
| #else /* ! LINUX */ |
| |
| typedef u16 __le16; |
| |
| #define ecore_atomic_read mm_atomic_read |
| #define ecore_atomic_cmpxchg mm_atomic_cmpxchg |
| #define ecore_atomic_set(a, val) mm_atomic_set((u32_t *)(a), val) |
| |
| #define ECORE_UNLIKELY(x) (x) |
| #define ECORE_LIKELY(x) (x) |
| #define BUG() DbgBreakMsg("Bug") |
| #define smp_mb() mm_barrier() |
| #define smp_mb__before_atomic() mm_barrier() |
| #define smp_mb__after_atomic() mm_barrier() |
| #define mb() mm_barrier() |
| #define wmb() mm_barrier() |
| #define mmiowb() mm_barrier() |
| |
| #define ECORE_MIGHT_SLEEP() /* IRQL_PASSIVE_CODE() */ |
| |
| /* Mutex related */ |
| #define ECORE_MUTEX_INIT(_mutex) |
| #define ECORE_MUTEX_LOCK(_mutex) |
| #define ECORE_MUTEX_UNLOCK(_mutex) |
| |
| /* Atomic Bit Manipulation */ |
| #define ECORE_TEST_BIT(_bit, _var) \ |
| (mm_atomic_long_read(_var) & (1 << (_bit))) |
| |
| /* Other */ |
| #define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE |
| #define ECORE_SET_WAIT_DELAY_US(_cnt, _delay_us) \ |
| do { \ |
| _delay_us = (_cnt >= 2360) ? 100 : 25000; \ |
| } while (0) |
| |
| /* |
| * In VBD We'll wait 10,000 times 100us (1 second) + |
| * 2360 times 25000us (59sec) = total 60 sec |
| * (Winodws only note) the 25000 wait will cause |
| * wait to be without CPU stall (look in win_util.c) |
| */ |
| #define ECORE_SET_WAIT_COUNT(_cnt) \ |
| do { \ |
| _cnt = 10000 + 2360; \ |
| } while (0) |
| |
| static __inline BOOL ECORE_TEST_AND_CLEAR_BIT(int bit, unsigned long *vec) |
| { |
| BOOL set = ECORE_TEST_BIT(bit, vec); |
| ECORE_CLEAR_BIT(bit, vec); |
| |
| return set; |
| } |
| |
| #endif /* END if "per LM target type" */ |
| |
| /* Spin lock related */ |
| #define ECORE_SPIN_LOCK_INIT(_spin, _pdev) mm_init_lock(_pdev, _spin) |
| #define ECORE_SPIN_LOCK_BH(_spin) mm_acquire_lock(_spin) |
| #define ECORE_SPIN_UNLOCK_BH(_spin) mm_release_lock(_spin) |
| |
| #endif /* not ECORE_ERASE */ |
| #if defined(__FreeBSD__) && !defined(NOT_LINUX) |
| #include "bxe.h" |
| #include "ecore_init.h" |
| #elif !defined(EDIAG) |
| #ifdef ECORE_ERASE |
| #include <linux/version.h> |
| #include <linux/module.h> |
| #include <linux/crc32.h> |
| #include <linux/netdevice.h> |
| #include <linux/etherdevice.h> |
| #if (LINUX_VERSION_CODE >= 0x02061b) && !defined(BNX2X_DRIVER_DISK) && !defined(__VMKLNX__) /* BNX2X_UPSTREAM */ |
| #include <linux/crc32c.h> |
| #endif |
| #include "bnx2x.h" |
| #include "bnx2x_cmn.h" |
| #include "bnx2x_sp.h" |
| |
| #define ECORE_MAX_EMUL_MULTI 16 |
| #endif |
| #endif |
| |
| /**** Exe Queue interfaces ****/ |
| |
| /** |
| * ecore_exe_queue_init - init the Exe Queue object |
| * |
| * @o: pointer to the object |
| * @exe_len: length |
| * @owner: pointer to the owner |
| * @validate: validate function pointer |
| * @optimize: optimize function pointer |
| * @exec: execute function pointer |
| * @get: get function pointer |
| */ |
| static INLINE void ecore_exe_queue_init(struct _lm_device_t *pdev, |
| struct ecore_exe_queue_obj *o, |
| int exe_len, |
| union ecore_qable_obj *owner, |
| exe_q_validate validate, |
| exe_q_remove remove, |
| exe_q_optimize optimize, |
| exe_q_execute exec, |
| exe_q_get get) |
| { |
| mm_memset(o, 0, sizeof(*o)); |
| |
| ECORE_LIST_INIT(&o->exe_queue); |
| ECORE_LIST_INIT(&o->pending_comp); |
| |
| ECORE_SPIN_LOCK_INIT(&o->lock, pdev); |
| |
| o->exe_chunk_len = exe_len; |
| o->owner = owner; |
| |
| /* Owner specific callbacks */ |
| o->validate = validate; |
| o->remove = remove; |
| o->optimize = optimize; |
| o->execute = exec; |
| o->get = get; |
| |
| ECORE_MSG(pdev, "Setup the execution queue with the chunk length of %d\n", |
| exe_len); |
| } |
| |
| static INLINE void ecore_exe_queue_free_elem(struct _lm_device_t *pdev, |
| struct ecore_exeq_elem *elem) |
| { |
| ECORE_MSG(pdev, "Deleting an exe_queue element\n"); |
| ECORE_FREE(pdev, elem, sizeof(*elem)); |
| } |
| |
| static INLINE int ecore_exe_queue_length(struct ecore_exe_queue_obj *o) |
| { |
| struct ecore_exeq_elem *elem; |
| int cnt = 0; |
| |
| #ifdef ECORE_ERASE |
| spin_lock_bh(&o->lock); |
| #endif |
| |
| ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link, |
| struct ecore_exeq_elem) |
| cnt++; |
| |
| #ifdef ECORE_ERASE |
| spin_unlock_bh(&o->lock); |
| #endif |
| |
| return cnt; |
| } |
| |
| /** |
| * ecore_exe_queue_add - add a new element to the execution queue |
| * |
| * @pdev: driver handle |
| * @o: queue |
| * @cmd: new command to add |
| * @restore: true - do not optimize the command |
| * |
| * If the element is optimized or is illegal, frees it. |
| */ |
| static INLINE int ecore_exe_queue_add(struct _lm_device_t *pdev, |
| struct ecore_exe_queue_obj *o, |
| struct ecore_exeq_elem *elem, |
| BOOL restore) |
| { |
| int rc; |
| |
| ECORE_SPIN_LOCK_BH(&o->lock); |
| |
| if (!restore) { |
| /* Try to cancel this element queue */ |
| rc = o->optimize(pdev, o->owner, elem); |
| if (rc) |
| goto free_and_exit; |
| |
| /* Check if this request is ok */ |
| rc = o->validate(pdev, o->owner, elem); |
| if (rc) { |
| ECORE_MSG(pdev, "Preamble failed: %d\n", rc); |
| goto free_and_exit; |
| } |
| } |
| |
| /* If so, add it to the execution queue */ |
| ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue); |
| |
| ECORE_SPIN_UNLOCK_BH(&o->lock); |
| |
| return ECORE_SUCCESS; |
| |
| free_and_exit: |
| ecore_exe_queue_free_elem(pdev, elem); |
| |
| ECORE_SPIN_UNLOCK_BH(&o->lock); |
| |
| return rc; |
| } |
| |
| static INLINE void __ecore_exe_queue_reset_pending( |
| struct _lm_device_t *pdev, |
| struct ecore_exe_queue_obj *o) |
| { |
| struct ecore_exeq_elem *elem; |
| |
| while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { |
| elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp, |
| struct ecore_exeq_elem, |
| link); |
| |
| ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp); |
| ecore_exe_queue_free_elem(pdev, elem); |
| } |
| } |
| |
| /** |
| * ecore_exe_queue_step - execute one execution chunk atomically |
| * |
| * @pdev: driver handle |
| * @o: queue |
| * @ramrod_flags: flags |
| * |
| * (Should be called while holding the exe_queue->lock). |
| */ |
| static INLINE int ecore_exe_queue_step(struct _lm_device_t *pdev, |
| struct ecore_exe_queue_obj *o, |
| unsigned long *ramrod_flags) |
| { |
| struct ecore_exeq_elem *elem, spacer; |
| int cur_len = 0, rc; |
| |
| mm_memset(&spacer, 0, sizeof(spacer)); |
| |
| /* Next step should not be performed until the current is finished, |
| * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to |
| * properly clear object internals without sending any command to the FW |
| * which also implies there won't be any completion to clear the |
| * 'pending' list. |
| */ |
| if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { |
| if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { |
| ECORE_MSG(pdev, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); |
| __ecore_exe_queue_reset_pending(pdev, o); |
| } else { |
| return ECORE_PENDING; |
| } |
| } |
| |
| /* Run through the pending commands list and create a next |
| * execution chunk. |
| */ |
| while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) { |
| elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue, |
| struct ecore_exeq_elem, |
| link); |
| DbgBreakIf(!elem->cmd_len); |
| |
| if (cur_len + elem->cmd_len <= o->exe_chunk_len) { |
| cur_len += elem->cmd_len; |
| /* Prevent from both lists being empty when moving an |
| * element. This will allow the call of |
| * ecore_exe_queue_empty() without locking. |
| */ |
| ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp); |
| mb(); |
| ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue); |
| ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp); |
| ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp); |
| } else |
| break; |
| } |
| |
| /* Sanity check */ |
| if (!cur_len) |
| return ECORE_SUCCESS; |
| |
| rc = o->execute(pdev, o->owner, &o->pending_comp, ramrod_flags); |
| if (rc < 0) |
| /* In case of an error return the commands back to the queue |
| * and reset the pending_comp. |
| */ |
| ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue); |
| else if (!rc) |
| /* If zero is returned, means there are no outstanding pending |
| * completions and we may dismiss the pending list. |
| */ |
| __ecore_exe_queue_reset_pending(pdev, o); |
| |
| return rc; |
| } |
| |
| static INLINE BOOL ecore_exe_queue_empty(struct ecore_exe_queue_obj *o) |
| { |
| BOOL empty = ECORE_LIST_IS_EMPTY(&o->exe_queue); |
| |
| /* Don't reorder!!! */ |
| mb(); |
| |
| return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp); |
| } |
| |
| static INLINE struct ecore_exeq_elem *ecore_exe_queue_alloc_elem( |
| struct _lm_device_t *pdev) |
| { |
| ECORE_MSG(pdev, "Allocating a new exe_queue element\n"); |
| return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, |
| pdev); |
| } |
| |
| /************************ raw_obj functions ***********************************/ |
| static BOOL ecore_raw_check_pending(struct ecore_raw_obj *o) |
| { |
| /* |
| * !! converts the value returned by ECORE_TEST_BIT such that it |
| * is guaranteed not to be truncated regardless of BOOL definition. |
| * |
| * Note we cannot simply define the function's return value type |
| * to match the type returned by ECORE_TEST_BIT, as it varies by |
| * platform/implementation. |
| */ |
| |
| return !!ECORE_TEST_BIT(o->state, o->pstate); |
| } |
| |
| static void ecore_raw_clear_pending(struct ecore_raw_obj *o) |
| { |
| smp_mb__before_atomic(); |
| ECORE_CLEAR_BIT(o->state, o->pstate); |
| smp_mb__after_atomic(); |
| } |
| |
| static void ecore_raw_set_pending(struct ecore_raw_obj *o) |
| { |
| smp_mb__before_atomic(); |
| ECORE_SET_BIT(o->state, o->pstate); |
| smp_mb__after_atomic(); |
| } |
| |
| /** |
| * ecore_state_wait - wait until the given bit(state) is cleared |
| * |
| * @pdev: device handle |
| * @state: state which is to be cleared |
| * @state_p: state buffer |
| * |
| */ |
| static INLINE int ecore_state_wait(struct _lm_device_t *pdev, int state, |
| unsigned long *pstate) |
| { |
| /* can take a while if any port is running */ |
| int cnt = 5000; |
| |
| #ifndef ECORE_ERASE |
| int delay_us = 1000; |
| |
| /* In VBD We'll wait 10,000 times 100us (1 second) + |
| * 2360 times 25000us (59sec) = total 60 sec |
| * (Winodws only note) the 25000 wait will cause wait |
| * to be without CPU stall (look in win_util.c) |
| */ |
| cnt = 10000 + 2360; |
| #endif |
| |
| if (CHIP_REV_IS_EMUL(pdev)) |
| cnt *= 20; |
| |
| ECORE_MSG(pdev, "waiting for state to become %d\n", state); |
| |
| ECORE_MIGHT_SLEEP(); |
| while (cnt--) { |
| if (!ECORE_TEST_BIT(state, pstate)) { |
| #ifdef ECORE_STOP_ON_ERROR |
| ECORE_MSG(pdev, "exit (cnt %d)\n", 5000 - cnt); |
| #endif |
| return ECORE_SUCCESS; |
| } |
| |
| #ifndef ECORE_ERASE |
| /* in case reset is in progress we won't get completion */ |
| if (lm_reset_is_inprogress(pdev)) |
| return 0; |
| |
| delay_us = (cnt >= 2360) ? 100 : 25000; |
| #endif |
| mm_wait(pdev, delay_us); |
| |
| if (pdev->panic) |
| return ECORE_IO; |
| } |
| |
| /* timeout! */ |
| ECORE_ERR("timeout waiting for state %d\n", state); |
| #ifdef ECORE_STOP_ON_ERROR |
| ecore_panic(); |
| #endif |
| |
| return ECORE_TIMEOUT; |
| } |
| |
| static int ecore_raw_wait(struct _lm_device_t *pdev, struct ecore_raw_obj *raw) |
| { |
| return ecore_state_wait(pdev, raw->state, raw->pstate); |
| } |
| |
| /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ |
| /* credit handling callbacks */ |
| static BOOL ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset) |
| { |
| struct ecore_credit_pool_obj *mp = o->macs_pool; |
| |
| DbgBreakIf(!mp); |
| |
| return mp->get_entry(mp, offset); |
| } |
| |
| static BOOL ecore_get_credit_mac(struct ecore_vlan_mac_obj *o) |
| { |
| struct ecore_credit_pool_obj *mp = o->macs_pool; |
| |
| DbgBreakIf(!mp); |
| |
| return mp->get(mp, 1); |
| } |
| |
| static BOOL ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset) |
| { |
| struct ecore_credit_pool_obj *vp = o->vlans_pool; |
| |
| DbgBreakIf(!vp); |
| |
| return vp->get_entry(vp, offset); |
| } |
| |
| static BOOL ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o) |
| { |
| struct ecore_credit_pool_obj *vp = o->vlans_pool; |
| |
| DbgBreakIf(!vp); |
| |
| return vp->get(vp, 1); |
| } |
| |
| static BOOL ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o) |
| { |
| struct ecore_credit_pool_obj *mp = o->macs_pool; |
| struct ecore_credit_pool_obj *vp = o->vlans_pool; |
| |
| if (!mp->get(mp, 1)) |
| return FALSE; |
| |
| if (!vp->get(vp, 1)) { |
| mp->put(mp, 1); |
| return FALSE; |
| } |
| |
| return TRUE; |
| } |
| |
| static BOOL ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset) |
| { |
| struct ecore_credit_pool_obj *mp = o->macs_pool; |
| |
| return mp->put_entry(mp, offset); |
| } |
| |
| static BOOL ecore_put_credit_mac(struct ecore_vlan_mac_obj *o) |
| { |
| struct ecore_credit_pool_obj *mp = o->macs_pool; |
| |
| return mp->put(mp, 1); |
| } |
| |
| static BOOL ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset) |
| { |
| struct ecore_credit_pool_obj *vp = o->vlans_pool; |
| |
| return vp->put_entry(vp, offset); |
| } |
| |
| static BOOL ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o) |
| { |
| struct ecore_credit_pool_obj *vp = o->vlans_pool; |
| |
| return vp->put(vp, 1); |
| } |
| |
| static BOOL ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o) |
| { |
| struct ecore_credit_pool_obj *mp = o->macs_pool; |
| struct ecore_credit_pool_obj *vp = o->vlans_pool; |
| |
| if (!mp->put(mp, 1)) |
| return FALSE; |
| |
| if (!vp->put(vp, 1)) { |
| mp->get(mp, 1); |
| return FALSE; |
| } |
| |
| return TRUE; |
| } |
| |
| /** |
| * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac |
| * head list. |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * |
| * @details: Non-blocking implementation; should be called under execution |
| * queue lock. |
| */ |
| static int __ecore_vlan_mac_h_write_trylock(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| if (o->head_reader) { |
| ECORE_MSG(pdev, "vlan_mac_lock writer - There are readers; Busy\n"); |
| return ECORE_BUSY; |
| } |
| |
| ECORE_MSG(pdev, "vlan_mac_lock writer - Taken\n"); |
| return ECORE_SUCCESS; |
| } |
| |
| /** |
| * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step |
| * which wasn't able to run due to a taken lock on vlan mac head list. |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * |
| * @details Should be called under execution queue lock; notice it might release |
| * and reclaim it during its run. |
| */ |
| static void __ecore_vlan_mac_h_exec_pending(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| int rc; |
| unsigned long ramrod_flags = o->saved_ramrod_flags; |
| |
| ECORE_MSG(pdev, "vlan_mac_lock execute pending command with ramrod flags %lu\n", |
| ramrod_flags); |
| o->head_exe_request = FALSE; |
| o->saved_ramrod_flags = 0; |
| rc = ecore_exe_queue_step(pdev, &o->exe_queue, &ramrod_flags); |
| if (rc != ECORE_SUCCESS) { |
| ECORE_ERR("execution of pending commands failed with rc %d\n", |
| rc); |
| #ifdef ECORE_STOP_ON_ERROR |
| ecore_panic(); |
| #endif |
| } |
| } |
| |
| /** |
| * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been |
| * called due to vlan mac head list lock being taken. |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * @ramrod_flags: ramrod flags of missed execution |
| * |
| * @details Should be called under execution queue lock. |
| */ |
| static void __ecore_vlan_mac_h_pend(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| unsigned long ramrod_flags) |
| { |
| o->head_exe_request = TRUE; |
| o->saved_ramrod_flags = ramrod_flags; |
| ECORE_MSG(pdev, "Placing pending execution with ramrod flags %lu\n", |
| ramrod_flags); |
| } |
| |
| /** |
| * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * |
| * @details Should be called under execution queue lock. Notice if a pending |
| * execution exists, it would perform it - possibly releasing and |
| * reclaiming the execution queue lock. |
| */ |
| static void __ecore_vlan_mac_h_write_unlock(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| /* It's possible a new pending execution was added since this writer |
| * executed. If so, execute again. [Ad infinitum] |
| */ |
| while(o->head_exe_request) { |
| ECORE_MSG(pdev, "vlan_mac_lock - writer release encountered a pending request\n"); |
| __ecore_vlan_mac_h_exec_pending(pdev, o); |
| } |
| } |
| |
| /** |
| * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * |
| * @details Notice if a pending execution exists, it would perform it - |
| * possibly releasing and reclaiming the execution queue lock. |
| */ |
| void ecore_vlan_mac_h_write_unlock(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); |
| __ecore_vlan_mac_h_write_unlock(pdev, o); |
| ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); |
| } |
| |
| /** |
| * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * |
| * @details Should be called under the execution queue lock. May sleep. May |
| * release and reclaim execution queue lock during its run. |
| */ |
| static int __ecore_vlan_mac_h_read_lock(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| /* If we got here, we're holding lock --> no WRITER exists */ |
| o->head_reader++; |
| ECORE_MSG(pdev, "vlan_mac_lock - locked reader - number %d\n", |
| o->head_reader); |
| |
| return ECORE_SUCCESS; |
| } |
| |
| /** |
| * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * |
| * @details May sleep. Claims and releases execution queue lock during its run. |
| */ |
| int ecore_vlan_mac_h_read_lock(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| int rc; |
| |
| ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); |
| rc = __ecore_vlan_mac_h_read_lock(pdev, o); |
| ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); |
| |
| return rc; |
| } |
| |
| /** |
| * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * |
| * @details Should be called under execution queue lock. Notice if a pending |
| * execution exists, it would be performed if this was the last |
| * reader. possibly releasing and reclaiming the execution queue lock. |
| */ |
| static void __ecore_vlan_mac_h_read_unlock(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| if (!o->head_reader) { |
| ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); |
| #ifdef ECORE_STOP_ON_ERROR |
| ecore_panic(); |
| #endif |
| } else { |
| o->head_reader--; |
| ECORE_MSG(pdev, "vlan_mac_lock - decreased readers to %d\n", |
| o->head_reader); |
| } |
| |
| /* It's possible a new pending execution was added, and that this reader |
| * was last - if so we need to execute the command. |
| */ |
| if (!o->head_reader && o->head_exe_request) { |
| ECORE_MSG(pdev, "vlan_mac_lock - reader release encountered a pending request\n"); |
| |
| /* Writer release will do the trick */ |
| __ecore_vlan_mac_h_write_unlock(pdev, o); |
| } |
| } |
| |
| /** |
| * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * |
| * @details Notice if a pending execution exists, it would be performed if this |
| * was the last reader. Claims and releases the execution queue lock |
| * during its run. |
| */ |
| void ecore_vlan_mac_h_read_unlock(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); |
| __ecore_vlan_mac_h_read_unlock(pdev, o); |
| ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); |
| } |
| |
| /** |
| * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock |
| * |
| * @pdev: device handle |
| * @o: vlan_mac object |
| * @n: number of elements to get |
| * @base: base address for element placement |
| * @stride: stride between elements (in bytes) |
| */ |
| static int ecore_get_n_elements(struct _lm_device_t *pdev, struct ecore_vlan_mac_obj *o, |
| int n, u8 *base, u8 stride, u8 size) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| u8 *next = base; |
| int counter = 0; |
| int read_lock; |
| |
| ECORE_MSG(pdev, "get_n_elements - taking vlan_mac_lock (reader)\n"); |
| read_lock = ecore_vlan_mac_h_read_lock(pdev, o); |
| if (read_lock != ECORE_SUCCESS) |
| ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); |
| |
| /* traverse list */ |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, |
| struct ecore_vlan_mac_registry_elem) { |
| if (counter < n) { |
| mm_memcpy(next, &pos->u, size); |
| counter++; |
| ECORE_MSG(pdev, "copied element number %d to address %p element was:\n", |
| counter, next); |
| next += stride + size; |
| } |
| } |
| |
| if (read_lock == ECORE_SUCCESS) { |
| ECORE_MSG(pdev, "get_n_elements - releasing vlan_mac_lock (reader)\n"); |
| ecore_vlan_mac_h_read_unlock(pdev, o); |
| } |
| |
| return counter * ETH_ALEN; |
| } |
| |
| /* check_add() callbacks */ |
| static int ecore_check_mac_add(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| union ecore_classification_ramrod_data *data) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| |
| ECORE_MSG(pdev, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); |
| |
| if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac)) |
| return ECORE_INVAL; |
| |
| /* Check if a requested MAC already exists */ |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, |
| struct ecore_vlan_mac_registry_elem) |
| if (mm_memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && |
| (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) |
| return ECORE_EXISTS; |
| |
| return ECORE_SUCCESS; |
| } |
| |
| static int ecore_check_vlan_add(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| union ecore_classification_ramrod_data *data) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| |
| ECORE_MSG(pdev, "Checking VLAN %d for ADD command\n", data->vlan.vlan); |
| |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, |
| struct ecore_vlan_mac_registry_elem) |
| if (data->vlan.vlan == pos->u.vlan.vlan) |
| return ECORE_EXISTS; |
| |
| return ECORE_SUCCESS; |
| } |
| |
| static int ecore_check_vlan_mac_add(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| union ecore_classification_ramrod_data *data) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| |
| ECORE_MSG(pdev, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n", |
| data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan); |
| |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, |
| struct ecore_vlan_mac_registry_elem) |
| if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && |
| (mm_memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, |
| ETH_ALEN)) && |
| (data->vlan_mac.is_inner_mac == |
| pos->u.vlan_mac.is_inner_mac)) |
| return ECORE_EXISTS; |
| |
| return ECORE_SUCCESS; |
| } |
| |
| /* check_del() callbacks */ |
| static struct ecore_vlan_mac_registry_elem * |
| ecore_check_mac_del(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| union ecore_classification_ramrod_data *data) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| |
| ECORE_MSG(pdev, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); |
| |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, |
| struct ecore_vlan_mac_registry_elem) |
| if ((mm_memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && |
| (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) |
| return pos; |
| |
| return NULL; |
| } |
| |
| static struct ecore_vlan_mac_registry_elem * |
| ecore_check_vlan_del(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| union ecore_classification_ramrod_data *data) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| |
| ECORE_MSG(pdev, "Checking VLAN %d for DEL command\n", data->vlan.vlan); |
| |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, |
| struct ecore_vlan_mac_registry_elem) |
| if (data->vlan.vlan == pos->u.vlan.vlan) |
| return pos; |
| |
| return NULL; |
| } |
| |
| static struct ecore_vlan_mac_registry_elem * |
| ecore_check_vlan_mac_del(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| union ecore_classification_ramrod_data *data) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| |
| ECORE_MSG(pdev, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n", |
| data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan); |
| |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, |
| struct ecore_vlan_mac_registry_elem) |
| if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && |
| (mm_memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, |
| ETH_ALEN)) && |
| (data->vlan_mac.is_inner_mac == |
| pos->u.vlan_mac.is_inner_mac)) |
| return pos; |
| |
| return NULL; |
| } |
| |
| /* check_move() callback */ |
| static BOOL ecore_check_move(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *src_o, |
| struct ecore_vlan_mac_obj *dst_o, |
| union ecore_classification_ramrod_data *data) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| int rc; |
| |
| /* Check if we can delete the requested configuration from the first |
| * object. |
| */ |
| pos = src_o->check_del(pdev, src_o, data); |
| |
| /* check if configuration can be added */ |
| rc = dst_o->check_add(pdev, dst_o, data); |
| |
| /* If this classification can not be added (is already set) |
| * or can't be deleted - return an error. |
| */ |
| if (rc || !pos) |
| return FALSE; |
| |
| return TRUE; |
| } |
| |
| static BOOL ecore_check_move_always_err( |
| struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *src_o, |
| struct ecore_vlan_mac_obj *dst_o, |
| union ecore_classification_ramrod_data *data) |
| { |
| return FALSE; |
| } |
| |
| static INLINE u8 ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o) |
| { |
| struct ecore_raw_obj *raw = &o->raw; |
| u8 rx_tx_flag = 0; |
| |
| if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || |
| (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) |
| rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; |
| |
| if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || |
| (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) |
| rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; |
| |
| return rx_tx_flag; |
| } |
| |
| void ecore_set_mac_in_nig(struct _lm_device_t *pdev, |
| BOOL add, unsigned char *dev_addr, int index) |
| { |
| u32 wb_data[2]; |
| u32 reg_offset = PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_MEM : |
| NIG_REG_LLH0_FUNC_MEM; |
| |
| if (!IS_MF_SI_MODE(pdev) && !IS_MF_AFEX(pdev)) |
| return; |
| |
| if (index > ECORE_LLH_CAM_MAX_PF_LINE) |
| return; |
| |
| ECORE_MSG(pdev, "Going to %s LLH configuration at entry %d\n", |
| (add ? "ADD" : "DELETE"), index); |
| |
| if (add) { |
| /* LLH_FUNC_MEM is a u64 WB register */ |
| reg_offset += 8*index; |
| |
| wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | |
| (dev_addr[4] << 8) | dev_addr[5]); |
| wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); |
| |
| REG_WR_DMAE_LEN(pdev, reg_offset, wb_data, 2); |
| } |
| |
| REG_WR(pdev, (PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : |
| NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); |
| } |
| |
| /** |
| * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod |
| * |
| * @pdev: device handle |
| * @o: queue for which we want to configure this rule |
| * @add: if TRUE the command is an ADD command, DEL otherwise |
| * @opcode: CLASSIFY_RULE_OPCODE_XXX |
| * @hdr: pointer to a header to setup |
| * |
| */ |
| static INLINE void ecore_vlan_mac_set_cmd_hdr_e2(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, BOOL add, int opcode, |
| struct eth_classify_cmd_header *hdr) |
| { |
| struct ecore_raw_obj *raw = &o->raw; |
| |
| hdr->client_id = raw->cl_id; |
| hdr->func_id = raw->func_id; |
| |
| /* Rx or/and Tx (internal switching) configuration ? */ |
| hdr->cmd_general_data |= |
| ecore_vlan_mac_get_rx_tx_flag(o); |
| |
| if (add) |
| hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; |
| |
| hdr->cmd_general_data |= |
| (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); |
| } |
| |
| /** |
| * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header |
| * |
| * @cid: connection id |
| * @type: ECORE_FILTER_XXX_PENDING |
| * @hdr: pointer to header to setup |
| * @rule_cnt: |
| * |
| * currently we always configure one rule and echo field to contain a CID and an |
| * opcode type. |
| */ |
| static INLINE void ecore_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, |
| struct eth_classify_header *hdr, int rule_cnt) |
| { |
| hdr->echo = mm_cpu_to_le32((cid & ECORE_SWCID_MASK) | |
| (type << ECORE_SWCID_SHIFT)); |
| hdr->rule_cnt = (u8)rule_cnt; |
| } |
| |
| /* hw_config() callbacks */ |
| static void ecore_set_one_mac_e2(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| struct ecore_exeq_elem *elem, int rule_idx, |
| int cam_offset) |
| { |
| struct ecore_raw_obj *raw = &o->raw; |
| struct eth_classify_rules_ramrod_data *data = |
| (struct eth_classify_rules_ramrod_data *)(raw->rdata); |
| int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; |
| union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; |
| BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; |
| unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; |
| u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; |
| |
| /* Set LLH CAM entry: currently only iSCSI and ETH macs are |
| * relevant. In addition, current implementation is tuned for a |
| * single ETH MAC. |
| * |
| * When multiple unicast ETH MACs PF configuration in switch |
| * independent mode is required (NetQ, multiple netdev MACs, |
| * etc.), consider better utilisation of 8 per function MAC |
| * entries in the LLH register. There is also |
| * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the |
| * total number of CAM entries to 16. |
| * |
| * Currently we won't configure NIG for MACs other than a primary ETH |
| * MAC and iSCSI L2 MAC. |
| * |
| * If this MAC is moving from one Queue to another, no need to change |
| * NIG configuration. |
| */ |
| if (cmd != ECORE_VLAN_MAC_MOVE) { |
| if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags)) |
| ecore_set_mac_in_nig(pdev, add, mac, |
| ECORE_LLH_CAM_ISCSI_ETH_LINE); |
| else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags)) |
| ecore_set_mac_in_nig(pdev, add, mac, |
| ECORE_LLH_CAM_ETH_LINE); |
| } |
| |
| /* Reset the ramrod data buffer for the first rule */ |
| if (rule_idx == 0) |
| mm_memset(data, 0, sizeof(*data)); |
| |
| /* Setup a command header */ |
| ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_MAC, |
| &rule_entry->mac.header); |
| |
| ECORE_MSG(pdev, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n", |
| (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id); |
| |
| /* Set a MAC itself */ |
| ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, |
| &rule_entry->mac.mac_mid, |
| &rule_entry->mac.mac_lsb, mac); |
| rule_entry->mac.inner_mac = |
| mm_cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac); |
| |
| /* MOVE: Add a rule that will add this MAC to the target Queue */ |
| if (cmd == ECORE_VLAN_MAC_MOVE) { |
| rule_entry++; |
| rule_cnt++; |
| |
| /* Setup ramrod data */ |
| ecore_vlan_mac_set_cmd_hdr_e2(pdev, |
| elem->cmd_data.vlan_mac.target_obj, |
| TRUE, CLASSIFY_RULE_OPCODE_MAC, |
| &rule_entry->mac.header); |
| |
| /* Set a MAC itself */ |
| ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, |
| &rule_entry->mac.mac_mid, |
| &rule_entry->mac.mac_lsb, mac); |
| rule_entry->mac.inner_mac = |
| mm_cpu_to_le16(elem->cmd_data.vlan_mac. |
| u.mac.is_inner_mac); |
| } |
| |
| /* Set the ramrod data header */ |
| /* TODO: take this to the higher level in order to prevent multiple |
| writing */ |
| ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, |
| rule_cnt); |
| } |
| |
| /** |
| * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod |
| * |
| * @pdev: device handle |
| * @o: queue |
| * @type: |
| * @cam_offset: offset in cam memory |
| * @hdr: pointer to a header to setup |
| * |
| * E1/E1H |
| */ |
| static INLINE void ecore_vlan_mac_set_rdata_hdr_e1x(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, int type, int cam_offset, |
| struct mac_configuration_hdr *hdr) |
| { |
| struct ecore_raw_obj *r = &o->raw; |
| |
| hdr->length = 1; |
| hdr->offset = (u8)cam_offset; |
| hdr->client_id = mm_cpu_to_le16(0xff); |
| hdr->echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) | |
| (type << ECORE_SWCID_SHIFT)); |
| } |
| |
| static INLINE void ecore_vlan_mac_set_cfg_entry_e1x(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, BOOL add, int opcode, u8 *mac, |
| u16 vlan_id, struct mac_configuration_entry *cfg_entry) |
| { |
| struct ecore_raw_obj *r = &o->raw; |
| u32 cl_bit_vec = (1 << r->cl_id); |
| |
| cfg_entry->clients_bit_vector = mm_cpu_to_le32(cl_bit_vec); |
| cfg_entry->pf_id = r->func_id; |
| cfg_entry->vlan_id = mm_cpu_to_le16(vlan_id); |
| |
| if (add) { |
| ECORE_SET_FLAG(cfg_entry->flags, |
| MAC_CONFIGURATION_ENTRY_ACTION_TYPE, |
| T_ETH_MAC_COMMAND_SET); |
| ECORE_SET_FLAG(cfg_entry->flags, |
| MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, |
| opcode); |
| |
| /* Set a MAC in a ramrod data */ |
| ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr, |
| &cfg_entry->middle_mac_addr, |
| &cfg_entry->lsb_mac_addr, mac); |
| } else |
| ECORE_SET_FLAG(cfg_entry->flags, |
| MAC_CONFIGURATION_ENTRY_ACTION_TYPE, |
| T_ETH_MAC_COMMAND_INVALIDATE); |
| } |
| |
| static INLINE void ecore_vlan_mac_set_rdata_e1x(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, int type, int cam_offset, BOOL add, |
| u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) |
| { |
| struct mac_configuration_entry *cfg_entry = &config->config_table[0]; |
| struct ecore_raw_obj *raw = &o->raw; |
| |
| ecore_vlan_mac_set_rdata_hdr_e1x(pdev, o, type, cam_offset, |
| &config->hdr); |
| ecore_vlan_mac_set_cfg_entry_e1x(pdev, o, add, opcode, mac, vlan_id, |
| cfg_entry); |
| |
| ECORE_MSG(pdev, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n", |
| (add ? "setting" : "clearing"), |
| mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset); |
| } |
| |
| /** |
| * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data |
| * |
| * @pdev: device handle |
| * @o: ecore_vlan_mac_obj |
| * @elem: ecore_exeq_elem |
| * @rule_idx: rule_idx |
| * @cam_offset: cam_offset |
| */ |
| static void ecore_set_one_mac_e1x(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| struct ecore_exeq_elem *elem, int rule_idx, |
| int cam_offset) |
| { |
| struct ecore_raw_obj *raw = &o->raw; |
| struct mac_configuration_cmd *config = |
| (struct mac_configuration_cmd *)(raw->rdata); |
| /* 57710 and 57711 do not support MOVE command, |
| * so it's either ADD or DEL |
| */ |
| BOOL add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? |
| TRUE : FALSE; |
| |
| /* Reset the ramrod data buffer */ |
| mm_memset(config, 0, sizeof(*config)); |
| |
| ecore_vlan_mac_set_rdata_e1x(pdev, o, raw->state, |
| cam_offset, add, |
| elem->cmd_data.vlan_mac.u.mac.mac, 0, |
| ETH_VLAN_FILTER_ANY_VLAN, config); |
| } |
| |
| static void ecore_set_one_vlan_e2(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| struct ecore_exeq_elem *elem, int rule_idx, |
| int cam_offset) |
| { |
| struct ecore_raw_obj *raw = &o->raw; |
| struct eth_classify_rules_ramrod_data *data = |
| (struct eth_classify_rules_ramrod_data *)(raw->rdata); |
| int rule_cnt = rule_idx + 1; |
| union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; |
| enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; |
| BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; |
| u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; |
| |
| /* Reset the ramrod data buffer for the first rule */ |
| if (rule_idx == 0) |
| mm_memset(data, 0, sizeof(*data)); |
| |
| /* Set a rule header */ |
| ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_VLAN, |
| &rule_entry->vlan.header); |
| |
| ECORE_MSG(pdev, "About to %s VLAN %d\n", (add ? "add" : "delete"), |
| vlan); |
| |
| /* Set a VLAN itself */ |
| rule_entry->vlan.vlan = mm_cpu_to_le16(vlan); |
| |
| /* MOVE: Add a rule that will add this MAC to the target Queue */ |
| if (cmd == ECORE_VLAN_MAC_MOVE) { |
| rule_entry++; |
| rule_cnt++; |
| |
| /* Setup ramrod data */ |
| ecore_vlan_mac_set_cmd_hdr_e2(pdev, |
| elem->cmd_data.vlan_mac.target_obj, |
| TRUE, CLASSIFY_RULE_OPCODE_VLAN, |
| &rule_entry->vlan.header); |
| |
| /* Set a VLAN itself */ |
| rule_entry->vlan.vlan = mm_cpu_to_le16(vlan); |
| } |
| |
| /* Set the ramrod data header */ |
| /* TODO: take this to the higher level in order to prevent multiple |
| writing */ |
| ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, |
| rule_cnt); |
| } |
| |
| static void ecore_set_one_vlan_mac_e2(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| struct ecore_exeq_elem *elem, |
| int rule_idx, int cam_offset) |
| { |
| struct ecore_raw_obj *raw = &o->raw; |
| struct eth_classify_rules_ramrod_data *data = |
| (struct eth_classify_rules_ramrod_data *)(raw->rdata); |
| int rule_cnt = rule_idx + 1; |
| union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; |
| enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; |
| BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; |
| u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; |
| u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; |
| |
| /* Reset the ramrod data buffer for the first rule */ |
| if (rule_idx == 0) |
| mm_memset(data, 0, sizeof(*data)); |
| |
| /* Set a rule header */ |
| ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_PAIR, |
| &rule_entry->pair.header); |
| |
| /* Set VLAN and MAC themselves */ |
| rule_entry->pair.vlan = mm_cpu_to_le16(vlan); |
| ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb, |
| &rule_entry->pair.mac_mid, |
| &rule_entry->pair.mac_lsb, mac); |
| rule_entry->pair.inner_mac = |
| elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; |
| /* MOVE: Add a rule that will add this MAC to the target Queue */ |
| if (cmd == ECORE_VLAN_MAC_MOVE) { |
| rule_entry++; |
| rule_cnt++; |
| |
| /* Setup ramrod data */ |
| ecore_vlan_mac_set_cmd_hdr_e2(pdev, |
| elem->cmd_data.vlan_mac.target_obj, |
| TRUE, CLASSIFY_RULE_OPCODE_PAIR, |
| &rule_entry->pair.header); |
| |
| /* Set a VLAN itself */ |
| rule_entry->pair.vlan = mm_cpu_to_le16(vlan); |
| ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb, |
| &rule_entry->pair.mac_mid, |
| &rule_entry->pair.mac_lsb, mac); |
| rule_entry->pair.inner_mac = |
| elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; |
| } |
| |
| /* Set the ramrod data header */ |
| /* TODO: take this to the higher level in order to prevent multiple |
| writing */ |
| ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, |
| rule_cnt); |
| } |
| |
| /** |
| * ecore_set_one_vlan_mac_e1h - |
| * |
| * @pdev: device handle |
| * @o: ecore_vlan_mac_obj |
| * @elem: ecore_exeq_elem |
| * @rule_idx: rule_idx |
| * @cam_offset: cam_offset |
| */ |
| static void ecore_set_one_vlan_mac_e1h(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| struct ecore_exeq_elem *elem, |
| int rule_idx, int cam_offset) |
| { |
| struct ecore_raw_obj *raw = &o->raw; |
| struct mac_configuration_cmd *config = |
| (struct mac_configuration_cmd *)(raw->rdata); |
| /* 57710 and 57711 do not support MOVE command, |
| * so it's either ADD or DEL |
| */ |
| BOOL add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? |
| TRUE : FALSE; |
| |
| /* Reset the ramrod data buffer */ |
| mm_memset(config, 0, sizeof(*config)); |
| |
| ecore_vlan_mac_set_rdata_e1x(pdev, o, ECORE_FILTER_VLAN_MAC_PENDING, |
| cam_offset, add, |
| elem->cmd_data.vlan_mac.u.vlan_mac.mac, |
| elem->cmd_data.vlan_mac.u.vlan_mac.vlan, |
| ETH_VLAN_FILTER_CLASSIFY, config); |
| } |
| |
| #define list_next_entry(pos, member) \ |
| list_entry((pos)->member.next, typeof(*(pos)), member) |
| |
| /** |
| * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element |
| * |
| * @pdev: device handle |
| * @p: command parameters |
| * @ppos: pointer to the cookie |
| * |
| * reconfigure next MAC/VLAN/VLAN-MAC element from the |
| * previously configured elements list. |
| * |
| * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken |
| * into an account |
| * |
| * pointer to the cookie - that should be given back in the next call to make |
| * function handle the next element. If *ppos is set to NULL it will restart the |
| * iterator. If returned *ppos == NULL this means that the last element has been |
| * handled. |
| * |
| */ |
| static int ecore_vlan_mac_restore(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_ramrod_params *p, |
| struct ecore_vlan_mac_registry_elem **ppos) |
| { |
| struct ecore_vlan_mac_registry_elem *pos; |
| struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; |
| |
| /* If list is empty - there is nothing to do here */ |
| if (ECORE_LIST_IS_EMPTY(&o->head)) { |
| *ppos = NULL; |
| return 0; |
| } |
| |
| /* make a step... */ |
| if (*ppos == NULL) |
| *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, |
| struct ecore_vlan_mac_registry_elem, |
| link); |
| else |
| *ppos = ECORE_LIST_NEXT(*ppos, link, |
| struct ecore_vlan_mac_registry_elem); |
| |
| pos = *ppos; |
| |
| /* If it's the last step - return NULL */ |
| if (ECORE_LIST_IS_LAST(&pos->link, &o->head)) |
| *ppos = NULL; |
| |
| /* Prepare a 'user_req' */ |
| mm_memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); |
| |
| /* Set the command */ |
| p->user_req.cmd = ECORE_VLAN_MAC_ADD; |
| |
| /* Set vlan_mac_flags */ |
| p->user_req.vlan_mac_flags = pos->vlan_mac_flags; |
| |
| /* Set a restore bit */ |
| ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags); |
| |
| return ecore_config_vlan_mac(pdev, p); |
| } |
| |
| /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a |
| * pointer to an element with a specific criteria and NULL if such an element |
| * hasn't been found. |
| */ |
| static struct ecore_exeq_elem *ecore_exeq_get_mac( |
| struct ecore_exe_queue_obj *o, |
| struct ecore_exeq_elem *elem) |
| { |
| struct ecore_exeq_elem *pos; |
| struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; |
| |
| /* Check pending for execution commands */ |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, |
| struct ecore_exeq_elem) |
| if (mm_memcmp(&pos->cmd_data.vlan_mac.u.mac, data, |
| sizeof(*data)) && |
| (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) |
| return pos; |
| |
| return NULL; |
| } |
| |
| static struct ecore_exeq_elem *ecore_exeq_get_vlan( |
| struct ecore_exe_queue_obj *o, |
| struct ecore_exeq_elem *elem) |
| { |
| struct ecore_exeq_elem *pos; |
| struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; |
| |
| /* Check pending for execution commands */ |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, |
| struct ecore_exeq_elem) |
| if (mm_memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, |
| sizeof(*data)) && |
| (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) |
| return pos; |
| |
| return NULL; |
| } |
| |
| static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac( |
| struct ecore_exe_queue_obj *o, |
| struct ecore_exeq_elem *elem) |
| { |
| struct ecore_exeq_elem *pos; |
| struct ecore_vlan_mac_ramrod_data *data = |
| &elem->cmd_data.vlan_mac.u.vlan_mac; |
| |
| /* Check pending for execution commands */ |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, |
| struct ecore_exeq_elem) |
| if (mm_memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, |
| sizeof(*data)) && |
| (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) |
| return pos; |
| |
| return NULL; |
| } |
| |
| /** |
| * ecore_validate_vlan_mac_add - check if an ADD command can be executed |
| * |
| * @pdev: device handle |
| * @qo: ecore_qable_obj |
| * @elem: ecore_exeq_elem |
| * |
| * Checks that the requested configuration can be added. If yes and if |
| * requested, consume CAM credit. |
| * |
| * The 'validate' is run after the 'optimize'. |
| * |
| */ |
| static INLINE int ecore_validate_vlan_mac_add(struct _lm_device_t *pdev, |
| union ecore_qable_obj *qo, |
| struct ecore_exeq_elem *elem) |
| { |
| struct ecore_vlan_mac_obj *o = &qo->vlan_mac; |
| struct ecore_exe_queue_obj *exeq = &o->exe_queue; |
| int rc; |
| |
| /* Check the registry */ |
| rc = o->check_add(pdev, o, &elem->cmd_data.vlan_mac.u); |
| if (rc) { |
| ECORE_MSG(pdev, "ADD command is not allowed considering current registry state.\n"); |
| return rc; |
| } |
| |
| /* Check if there is a pending ADD command for this |
| * MAC/VLAN/VLAN-MAC. Return an error if there is. |
| */ |
| if (exeq->get(exeq, elem)) { |
| ECORE_MSG(pdev, "There is a pending ADD command already\n"); |
| return ECORE_EXISTS; |
| } |
| |
| /* TODO: Check the pending MOVE from other objects where this |
| * object is a destination object. |
| */ |
| |
| /* Consume the credit if not requested not to */ |
| if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, |
| &elem->cmd_data.vlan_mac.vlan_mac_flags) || |
| o->get_credit(o))) |
| return ECORE_INVAL; |
| |
| return ECORE_SUCCESS; |
| } |
| |
| /** |
| * ecore_validate_vlan_mac_del - check if the DEL command can be executed |
| * |
| * @pdev: device handle |
| * @qo: quable object to check |
| * @elem: element that needs to be deleted |
| * |
| * Checks that the requested configuration can be deleted. If yes and if |
| * requested, returns a CAM credit. |
| * |
| * The 'validate' is run after the 'optimize'. |
| */ |
| static INLINE int ecore_validate_vlan_mac_del(struct _lm_device_t *pdev, |
| union ecore_qable_obj *qo, |
| struct ecore_exeq_elem *elem) |
| { |
| struct ecore_vlan_mac_obj *o = &qo->vlan_mac; |
| struct ecore_vlan_mac_registry_elem *pos; |
| struct ecore_exe_queue_obj *exeq = &o->exe_queue; |
| struct ecore_exeq_elem query_elem; |
| |
| /* If this classification can not be deleted (doesn't exist) |
| * - return a ECORE_EXIST. |
| */ |
| pos = o->check_del(pdev, o, &elem->cmd_data.vlan_mac.u); |
| if (!pos) { |
| ECORE_MSG(pdev, "DEL command is not allowed considering current registry state\n"); |
| return ECORE_EXISTS; |
| } |
| |
| /* Check if there are pending DEL or MOVE commands for this |
| * MAC/VLAN/VLAN-MAC. Return an error if so. |
| */ |
| mm_memcpy(&query_elem, elem, sizeof(query_elem)); |
| |
| /* Check for MOVE commands */ |
| query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE; |
| if (exeq->get(exeq, &query_elem)) { |
| ECORE_ERR("There is a pending MOVE command already\n"); |
| return ECORE_INVAL; |
| } |
| |
| /* Check for DEL commands */ |
| if (exeq->get(exeq, elem)) { |
| ECORE_MSG(pdev, "There is a pending DEL command already\n"); |
| return ECORE_EXISTS; |
| } |
| |
| /* Return the credit to the credit pool if not requested not to */ |
| if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, |
| &elem->cmd_data.vlan_mac.vlan_mac_flags) || |
| o->put_credit(o))) { |
| ECORE_ERR("Failed to return a credit\n"); |
| return ECORE_INVAL; |
| } |
| |
| return ECORE_SUCCESS; |
| } |
| |
| /** |
| * ecore_validate_vlan_mac_move - check if the MOVE command can be executed |
| * |
| * @pdev: device handle |
| * @qo: quable object to check (source) |
| * @elem: element that needs to be moved |
| * |
| * Checks that the requested configuration can be moved. If yes and if |
| * requested, returns a CAM credit. |
| * |
| * The 'validate' is run after the 'optimize'. |
| */ |
| static INLINE int ecore_validate_vlan_mac_move(struct _lm_device_t *pdev, |
| union ecore_qable_obj *qo, |
| struct ecore_exeq_elem *elem) |
| { |
| struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac; |
| struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; |
| struct ecore_exeq_elem query_elem; |
| struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue; |
| struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue; |
| |
| /* Check if we can perform this operation based on the current registry |
| * state. |
| */ |
| if (!src_o->check_move(pdev, src_o, dest_o, |
| &elem->cmd_data.vlan_mac.u)) { |
| ECORE_MSG(pdev, "MOVE command is not allowed considering current registry state\n"); |
| return ECORE_INVAL; |
| } |
| |
| /* Check if there is an already pending DEL or MOVE command for the |
| * source object or ADD command for a destination object. Return an |
| * error if so. |
| */ |
| mm_memcpy(&query_elem, elem, sizeof(query_elem)); |
| |
| /* Check DEL on source */ |
| query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; |
| if (src_exeq->get(src_exeq, &query_elem)) { |
| ECORE_ERR("There is a pending DEL command on the source queue already\n"); |
| return ECORE_INVAL; |
| } |
| |
| /* Check MOVE on source */ |
| if (src_exeq->get(src_exeq, elem)) { |
| ECORE_MSG(pdev, "There is a pending MOVE command already\n"); |
| return ECORE_EXISTS; |
| } |
| |
| /* Check ADD on destination */ |
| query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; |
| if (dest_exeq->get(dest_exeq, &query_elem)) { |
| ECORE_ERR("There is a pending ADD command on the destination queue already\n"); |
| return ECORE_INVAL; |
| } |
| |
| /* Consume the credit if not requested not to */ |
| if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST, |
| &elem->cmd_data.vlan_mac.vlan_mac_flags) || |
| dest_o->get_credit(dest_o))) |
| return ECORE_INVAL; |
| |
| if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, |
| &elem->cmd_data.vlan_mac.vlan_mac_flags) || |
| src_o->put_credit(src_o))) { |
| /* return the credit taken from dest... */ |
| dest_o->put_credit(dest_o); |
| return ECORE_INVAL; |
| } |
| |
| return ECORE_SUCCESS; |
| } |
| |
| static int ecore_validate_vlan_mac(struct _lm_device_t *pdev, |
| union ecore_qable_obj *qo, |
| struct ecore_exeq_elem *elem) |
| { |
| switch (elem->cmd_data.vlan_mac.cmd) { |
| case ECORE_VLAN_MAC_ADD: |
| return ecore_validate_vlan_mac_add(pdev, qo, elem); |
| case ECORE_VLAN_MAC_DEL: |
| return ecore_validate_vlan_mac_del(pdev, qo, elem); |
| case ECORE_VLAN_MAC_MOVE: |
| return ecore_validate_vlan_mac_move(pdev, qo, elem); |
| default: |
| return ECORE_INVAL; |
| } |
| } |
| |
| static int ecore_remove_vlan_mac(struct _lm_device_t *pdev, |
| union ecore_qable_obj *qo, |
| struct ecore_exeq_elem *elem) |
| { |
| int rc = 0; |
| |
| /* If consumption wasn't required, nothing to do */ |
| if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, |
| &elem->cmd_data.vlan_mac.vlan_mac_flags)) |
| return ECORE_SUCCESS; |
| |
| switch (elem->cmd_data.vlan_mac.cmd) { |
| case ECORE_VLAN_MAC_ADD: |
| case ECORE_VLAN_MAC_MOVE: |
| rc = qo->vlan_mac.put_credit(&qo->vlan_mac); |
| break; |
| case ECORE_VLAN_MAC_DEL: |
| rc = qo->vlan_mac.get_credit(&qo->vlan_mac); |
| break; |
| default: |
| return ECORE_INVAL; |
| } |
| |
| if (rc != TRUE) |
| return ECORE_INVAL; |
| |
| return ECORE_SUCCESS; |
| } |
| |
| /** |
| * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes. |
| * |
| * @pdev: device handle |
| * @o: ecore_vlan_mac_obj |
| * |
| */ |
| static int ecore_wait_vlan_mac(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o) |
| { |
| int cnt = 5000, rc; |
| struct ecore_exe_queue_obj *exeq = &o->exe_queue; |
| struct ecore_raw_obj *raw = &o->raw; |
| |
| while (cnt--) { |
| /* Wait for the current command to complete */ |
| rc = raw->wait_comp(pdev, raw); |
| if (rc) |
| return rc; |
| |
| /* Wait until there are no pending commands */ |
| if (!ecore_exe_queue_empty(exeq)) |
| mm_wait(pdev, 1000); |
| else |
| return ECORE_SUCCESS; |
| } |
| |
| return ECORE_TIMEOUT; |
| } |
| |
| static int __ecore_vlan_mac_execute_step(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| unsigned long *ramrod_flags) |
| { |
| int rc = ECORE_SUCCESS; |
| |
| ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); |
| |
| ECORE_MSG(pdev, "vlan_mac_execute_step - trying to take writer lock\n"); |
| rc = __ecore_vlan_mac_h_write_trylock(pdev, o); |
| |
| if (rc != ECORE_SUCCESS) { |
| __ecore_vlan_mac_h_pend(pdev, o, *ramrod_flags); |
| |
| /** Calling function should not diffrentiate between this case |
| * and the case in which there is already a pending ramrod |
| */ |
| rc = ECORE_PENDING; |
| } else { |
| rc = ecore_exe_queue_step(pdev, &o->exe_queue, ramrod_flags); |
| } |
| ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); |
| |
| return rc; |
| } |
| |
| /** |
| * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod |
| * |
| * @pdev: device handle |
| * @o: ecore_vlan_mac_obj |
| * @cqe: |
| * @cont: if TRUE schedule next execution chunk |
| * |
| */ |
| static int ecore_complete_vlan_mac(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| union event_ring_elem *cqe, |
| unsigned long *ramrod_flags) |
| { |
| struct ecore_raw_obj *r = &o->raw; |
| int rc; |
| |
| /* Clearing the pending list & raw state should be made |
| * atomically (as execution flow assumes they represent the same) |
| */ |
| ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); |
| |
| /* Reset pending list */ |
| __ecore_exe_queue_reset_pending(pdev, &o->exe_queue); |
| |
| /* Clear pending */ |
| r->clear_pending(r); |
| |
| ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); |
| |
| /* If ramrod failed this is most likely a SW bug */ |
| if (cqe->message.error) |
| return ECORE_INVAL; |
| |
| /* Run the next bulk of pending commands if requested */ |
| if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) { |
| rc = __ecore_vlan_mac_execute_step(pdev, o, ramrod_flags); |
| if (rc < 0) |
| return rc; |
| } |
| |
| /* If there is more work to do return PENDING */ |
| if (!ecore_exe_queue_empty(&o->exe_queue)) |
| return ECORE_PENDING; |
| |
| return ECORE_SUCCESS; |
| } |
| |
| /** |
| * ecore_optimize_vlan_mac - optimize ADD and DEL commands. |
| * |
| * @pdev: device handle |
| * @o: ecore_qable_obj |
| * @elem: ecore_exeq_elem |
| */ |
| static int ecore_optimize_vlan_mac(struct _lm_device_t *pdev, |
| union ecore_qable_obj *qo, |
| struct ecore_exeq_elem *elem) |
| { |
| struct ecore_exeq_elem query, *pos; |
| struct ecore_vlan_mac_obj *o = &qo->vlan_mac; |
| struct ecore_exe_queue_obj *exeq = &o->exe_queue; |
| |
| mm_memcpy(&query, elem, sizeof(query)); |
| |
| switch (elem->cmd_data.vlan_mac.cmd) { |
| case ECORE_VLAN_MAC_ADD: |
| query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; |
| break; |
| case ECORE_VLAN_MAC_DEL: |
| query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; |
| break; |
| default: |
| /* Don't handle anything other than ADD or DEL */ |
| return 0; |
| } |
| |
| /* If we found the appropriate element - delete it */ |
| pos = exeq->get(exeq, &query); |
| if (pos) { |
| |
| /* Return the credit of the optimized command */ |
| if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, |
| &pos->cmd_data.vlan_mac.vlan_mac_flags)) { |
| if ((query.cmd_data.vlan_mac.cmd == |
| ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) { |
| ECORE_ERR("Failed to return the credit for the optimized ADD command\n"); |
| return ECORE_INVAL; |
| } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ |
| ECORE_ERR("Failed to recover the credit from the optimized DEL command\n"); |
| return ECORE_INVAL; |
| } |
| } |
| |
| ECORE_MSG(pdev, "Optimizing %s command\n", |
| (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? |
| "ADD" : "DEL"); |
| |
| ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue); |
| ecore_exe_queue_free_elem(pdev, pos); |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * ecore_vlan_mac_get_registry_elem - prepare a registry element |
| * |
| * @pdev: device handle |
| * @o: |
| * @elem: |
| * @restore: |
| * @re: |
| * |
| * prepare a registry element according to the current command request. |
| */ |
| static INLINE int ecore_vlan_mac_get_registry_elem( |
| struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| struct ecore_exeq_elem *elem, |
| BOOL restore, |
| struct ecore_vlan_mac_registry_elem **re) |
| { |
| enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; |
| struct ecore_vlan_mac_registry_elem *reg_elem; |
| |
| /* Allocate a new registry element if needed. */ |
| if (!restore && |
| ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) { |
| reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, pdev); |
| if (!reg_elem) |
| return ECORE_NOMEM; |
| |
| /* Get a new CAM offset */ |
| if (!o->get_cam_offset(o, ®_elem->cam_offset)) { |
| /* This shall never happen, because we have checked the |
| * CAM availability in the 'validate'. |
| */ |
| DbgBreakIf(1); |
| ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem)); |
| return ECORE_INVAL; |
| } |
| |
| ECORE_MSG(pdev, "Got cam offset %d\n", reg_elem->cam_offset); |
| |
| /* Set a VLAN-MAC data */ |
| mm_memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, |
| sizeof(reg_elem->u)); |
| |
| /* Copy the flags (needed for DEL and RESTORE flows) */ |
| reg_elem->vlan_mac_flags = |
| elem->cmd_data.vlan_mac.vlan_mac_flags; |
| } else /* DEL, RESTORE */ |
| reg_elem = o->check_del(pdev, o, &elem->cmd_data.vlan_mac.u); |
| |
| *re = reg_elem; |
| return ECORE_SUCCESS; |
| } |
| |
| /** |
| * ecore_execute_vlan_mac - execute vlan mac command |
| * |
| * @pdev: device handle |
| * @qo: |
| * @exe_chunk: |
| * @ramrod_flags: |
| * |
| * go and send a ramrod! |
| */ |
| static int ecore_execute_vlan_mac(struct _lm_device_t *pdev, |
| union ecore_qable_obj *qo, |
| d_list_t *exe_chunk, |
| unsigned long *ramrod_flags) |
| { |
| struct ecore_exeq_elem *elem; |
| struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; |
| struct ecore_raw_obj *r = &o->raw; |
| int rc, idx = 0; |
| BOOL restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags); |
| BOOL drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags); |
| struct ecore_vlan_mac_registry_elem *reg_elem; |
| enum ecore_vlan_mac_cmd cmd; |
| |
| /* If DRIVER_ONLY execution is requested, cleanup a registry |
| * and exit. Otherwise send a ramrod to FW. |
| */ |
| if (!drv_only) { |
| DbgBreakIf(r->check_pending(r)); |
| |
| /* Set pending */ |
| r->set_pending(r); |
| |
| /* Fill the ramrod data */ |
| ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, |
| struct ecore_exeq_elem) { |
| cmd = elem->cmd_data.vlan_mac.cmd; |
| /* We will add to the target object in MOVE command, so |
| * change the object for a CAM search. |
| */ |
| if (cmd == ECORE_VLAN_MAC_MOVE) |
| cam_obj = elem->cmd_data.vlan_mac.target_obj; |
| else |
| cam_obj = o; |
| |
| rc = ecore_vlan_mac_get_registry_elem(pdev, cam_obj, |
| elem, restore, |
| ®_elem); |
| if (rc) |
| goto error_exit; |
| |
| DbgBreakIf(!reg_elem); |
| |
| /* Push a new entry into the registry */ |
| if (!restore && |
| ((cmd == ECORE_VLAN_MAC_ADD) || |
| (cmd == ECORE_VLAN_MAC_MOVE))) |
| ECORE_LIST_PUSH_HEAD(®_elem->link, |
| &cam_obj->head); |
| |
| /* Configure a single command in a ramrod data buffer */ |
| o->set_one_rule(pdev, o, elem, idx, |
| reg_elem->cam_offset); |
| |
| /* MOVE command consumes 2 entries in the ramrod data */ |
| if (cmd == ECORE_VLAN_MAC_MOVE) |
| idx += 2; |
| else |
| idx++; |
| } |
| |
| /* No need for an explicit memory barrier here as long as we |
| * ensure the ordering of writing to the SPQ element |
| * and updating of the SPQ producer which involves a memory |
| * read. If the memory read is removed we will have to put a |
| * full memory barrier there (inside ecore_sp_post()). |
| */ |
| rc = ecore_sp_post(pdev, o->ramrod_cmd, r->cid, |
| r->rdata_mapping.as_u64, |
| ETH_CONNECTION_TYPE); |
| if (rc) |
| goto error_exit; |
| } |
| |
| /* Now, when we are done with the ramrod - clean up the registry */ |
| ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, |
| struct ecore_exeq_elem) { |
| cmd = elem->cmd_data.vlan_mac.cmd; |
| if ((cmd == ECORE_VLAN_MAC_DEL) || |
| (cmd == ECORE_VLAN_MAC_MOVE)) { |
| reg_elem = o->check_del(pdev, o, |
| &elem->cmd_data.vlan_mac.u); |
| |
| DbgBreakIf(!reg_elem); |
| |
| o->put_cam_offset(o, reg_elem->cam_offset); |
| ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head); |
| ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem)); |
| } |
| } |
| |
| if (!drv_only) |
| return ECORE_PENDING; |
| else |
| return ECORE_SUCCESS; |
| |
| error_exit: |
| r->clear_pending(r); |
| |
| /* Cleanup a registry in case of a failure */ |
| ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, |
| struct ecore_exeq_elem) { |
| cmd = elem->cmd_data.vlan_mac.cmd; |
| |
| if (cmd == ECORE_VLAN_MAC_MOVE) |
| cam_obj = elem->cmd_data.vlan_mac.target_obj; |
| else |
| cam_obj = o; |
| |
| /* Delete all newly added above entries */ |
| if (!restore && |
| ((cmd == ECORE_VLAN_MAC_ADD) || |
| (cmd == ECORE_VLAN_MAC_MOVE))) { |
| reg_elem = o->check_del(pdev, cam_obj, |
| &elem->cmd_data.vlan_mac.u); |
| if (reg_elem) { |
| ECORE_LIST_REMOVE_ENTRY(®_elem->link, |
| &cam_obj->head); |
| ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem)); |
| } |
| } |
| } |
| |
| return rc; |
| } |
| |
| static INLINE int ecore_vlan_mac_push_new_cmd( |
| struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_ramrod_params *p) |
| { |
| struct ecore_exeq_elem *elem; |
| struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; |
| BOOL restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags); |
| |
| /* Allocate the execution queue element */ |
| elem = ecore_exe_queue_alloc_elem(pdev); |
| if (!elem) |
| return ECORE_NOMEM; |
| |
| /* Set the command 'length' */ |
| switch (p->user_req.cmd) { |
| case ECORE_VLAN_MAC_MOVE: |
| elem->cmd_len = 2; |
| break; |
| default: |
| elem->cmd_len = 1; |
| } |
| |
| /* Fill the object specific info */ |
| mm_memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); |
| |
| /* Try to add a new command to the pending list */ |
| return ecore_exe_queue_add(pdev, &o->exe_queue, elem, restore); |
| } |
| |
| /** |
| * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. |
| * |
| * @pdev: device handle |
| * @p: |
| * |
| */ |
| int ecore_config_vlan_mac(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_ramrod_params *p) |
| { |
| int rc = ECORE_SUCCESS; |
| struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; |
| unsigned long *ramrod_flags = &p->ramrod_flags; |
| BOOL cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags); |
| struct ecore_raw_obj *raw = &o->raw; |
| |
| /* |
| * Add new elements to the execution list for commands that require it. |
| */ |
| if (!cont) { |
| rc = ecore_vlan_mac_push_new_cmd(pdev, p); |
| if (rc) |
| return rc; |
| } |
| |
| /* If nothing will be executed further in this iteration we want to |
| * return PENDING if there are pending commands |
| */ |
| if (!ecore_exe_queue_empty(&o->exe_queue)) |
| rc = ECORE_PENDING; |
| |
| if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { |
| ECORE_MSG(pdev, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); |
| raw->clear_pending(raw); |
| } |
| |
| /* Execute commands if required */ |
| if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) || |
| ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) { |
| rc = __ecore_vlan_mac_execute_step(pdev, p->vlan_mac_obj, |
| &p->ramrod_flags); |
| if (rc < 0) |
| return rc; |
| } |
| |
| /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set |
| * then user want to wait until the last command is done. |
| */ |
| if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { |
| /* Wait maximum for the current exe_queue length iterations plus |
| * one (for the current pending command). |
| */ |
| int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1; |
| |
| while (!ecore_exe_queue_empty(&o->exe_queue) && |
| max_iterations--) { |
| |
| /* Wait for the current command to complete */ |
| rc = raw->wait_comp(pdev, raw); |
| if (rc) |
| return rc; |
| |
| /* Make a next step */ |
| rc = __ecore_vlan_mac_execute_step(pdev, |
| p->vlan_mac_obj, |
| &p->ramrod_flags); |
| if (rc < 0) |
| return rc; |
| } |
| |
| return ECORE_SUCCESS; |
| } |
| |
| return rc; |
| } |
| |
| /** |
| * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec |
| * |
| * @pdev: device handle |
| * @o: |
| * @vlan_mac_flags: |
| * @ramrod_flags: execution flags to be used for this deletion |
| * |
| * if the last operation has completed successfully and there are no |
| * more elements left, positive value if the last operation has completed |
| * successfully and there are more previously configured elements, negative |
| * value is current operation has failed. |
| */ |
| static int ecore_vlan_mac_del_all(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *o, |
| unsigned long *vlan_mac_flags, |
| unsigned long *ramrod_flags) |
| { |
| struct ecore_vlan_mac_registry_elem *pos = NULL; |
| struct ecore_vlan_mac_ramrod_params p; |
| struct ecore_exe_queue_obj *exeq = &o->exe_queue; |
| struct ecore_exeq_elem *exeq_pos, *exeq_pos_n; |
| unsigned long flags; |
| int read_lock; |
| int rc = 0; |
| |
| /* Clear pending commands first */ |
| |
| ECORE_SPIN_LOCK_BH(&exeq->lock); |
| |
| ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n, |
| &exeq->exe_queue, link, |
| struct ecore_exeq_elem) { |
| flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; |
| if (ECORE_VLAN_MAC_CMP_FLAGS(flags) == |
| ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { |
| rc = exeq->remove(pdev, exeq->owner, exeq_pos); |
| if (rc) { |
| ECORE_ERR("Failed to remove command\n"); |
| ECORE_SPIN_UNLOCK_BH(&exeq->lock); |
| return rc; |
| } |
| ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link, |
| &exeq->exe_queue); |
| ecore_exe_queue_free_elem(pdev, exeq_pos); |
| } |
| } |
| |
| ECORE_SPIN_UNLOCK_BH(&exeq->lock); |
| |
| /* Prepare a command request */ |
| mm_memset(&p, 0, sizeof(p)); |
| p.vlan_mac_obj = o; |
| p.ramrod_flags = *ramrod_flags; |
| p.user_req.cmd = ECORE_VLAN_MAC_DEL; |
| |
| /* Add all but the last VLAN-MAC to the execution queue without actually |
| * execution anything. |
| */ |
| ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags); |
| ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags); |
| ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags); |
| |
| ECORE_MSG(pdev, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); |
| read_lock = ecore_vlan_mac_h_read_lock(pdev, o); |
| if (read_lock != ECORE_SUCCESS) |
| return read_lock; |
| |
| ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, |
| struct ecore_vlan_mac_registry_elem) { |
| flags = pos->vlan_mac_flags; |
| if (ECORE_VLAN_MAC_CMP_FLAGS(flags) == |
| ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { |
| p.user_req.vlan_mac_flags = pos->vlan_mac_flags; |
| mm_memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); |
| rc = ecore_config_vlan_mac(pdev, &p); |
| if (rc < 0) { |
| ECORE_ERR("Failed to add a new DEL command\n"); |
| ecore_vlan_mac_h_read_unlock(pdev, o); |
| return rc; |
| } |
| } |
| } |
| |
| ECORE_MSG(pdev, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); |
| ecore_vlan_mac_h_read_unlock(pdev, o); |
| |
| p.ramrod_flags = *ramrod_flags; |
| ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags); |
| |
| return ecore_config_vlan_mac(pdev, &p); |
| } |
| |
| static INLINE void ecore_init_raw_obj(struct ecore_raw_obj *raw, u8 cl_id, |
| u32 cid, u8 func_id, void *rdata, lm_address_t rdata_mapping, int state, |
| unsigned long *pstate, ecore_obj_type type) |
| { |
| raw->func_id = func_id; |
| raw->cid = cid; |
| raw->cl_id = cl_id; |
| raw->rdata = rdata; |
| raw->rdata_mapping = rdata_mapping; |
| raw->state = state; |
| raw->pstate = pstate; |
| raw->obj_type = type; |
| raw->check_pending = ecore_raw_check_pending; |
| raw->clear_pending = ecore_raw_clear_pending; |
| raw->set_pending = ecore_raw_set_pending; |
| raw->wait_comp = ecore_raw_wait; |
| } |
| |
| static INLINE void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o, |
| u8 cl_id, u32 cid, u8 func_id, void *rdata, lm_address_t rdata_mapping, |
| int state, unsigned long *pstate, ecore_obj_type type, |
| struct ecore_credit_pool_obj *macs_pool, |
| struct ecore_credit_pool_obj *vlans_pool) |
| { |
| ECORE_LIST_INIT(&o->head); |
| o->head_reader = 0; |
| o->head_exe_request = FALSE; |
| o->saved_ramrod_flags = 0; |
| |
| o->macs_pool = macs_pool; |
| o->vlans_pool = vlans_pool; |
| |
| o->delete_all = ecore_vlan_mac_del_all; |
| o->restore = ecore_vlan_mac_restore; |
| o->complete = ecore_complete_vlan_mac; |
| o->wait = ecore_wait_vlan_mac; |
| |
| ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, |
| state, pstate, type); |
| } |
| |
| void ecore_init_mac_obj(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *mac_obj, |
| u8 cl_id, u32 cid, u8 func_id, void *rdata, |
| lm_address_t rdata_mapping, int state, |
| unsigned long *pstate, ecore_obj_type type, |
| struct ecore_credit_pool_obj *macs_pool) |
| { |
| union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj; |
| |
| ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, |
| rdata_mapping, state, pstate, type, |
| macs_pool, NULL); |
| |
| /* CAM credit pool handling */ |
| mac_obj->get_credit = ecore_get_credit_mac; |
| mac_obj->put_credit = ecore_put_credit_mac; |
| mac_obj->get_cam_offset = ecore_get_cam_offset_mac; |
| mac_obj->put_cam_offset = ecore_put_cam_offset_mac; |
| |
| if (CHIP_IS_E1x(pdev)) { |
| mac_obj->set_one_rule = ecore_set_one_mac_e1x; |
| mac_obj->check_del = ecore_check_mac_del; |
| mac_obj->check_add = ecore_check_mac_add; |
| mac_obj->check_move = ecore_check_move_always_err; |
| mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; |
| |
| /* Exe Queue */ |
| ecore_exe_queue_init(pdev, |
| &mac_obj->exe_queue, 1, qable_obj, |
| ecore_validate_vlan_mac, |
| ecore_remove_vlan_mac, |
| ecore_optimize_vlan_mac, |
| ecore_execute_vlan_mac, |
| ecore_exeq_get_mac); |
| } else { |
| mac_obj->set_one_rule = ecore_set_one_mac_e2; |
| mac_obj->check_del = ecore_check_mac_del; |
| mac_obj->check_add = ecore_check_mac_add; |
| mac_obj->check_move = ecore_check_move; |
| mac_obj->ramrod_cmd = |
| RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; |
| mac_obj->get_n_elements = ecore_get_n_elements; |
| |
| /* Exe Queue */ |
| ecore_exe_queue_init(pdev, |
| &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, |
| qable_obj, ecore_validate_vlan_mac, |
| ecore_remove_vlan_mac, |
| ecore_optimize_vlan_mac, |
| ecore_execute_vlan_mac, |
| ecore_exeq_get_mac); |
| } |
| } |
| |
| void ecore_init_vlan_obj(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *vlan_obj, |
| u8 cl_id, u32 cid, u8 func_id, void *rdata, |
| lm_address_t rdata_mapping, int state, |
| unsigned long *pstate, ecore_obj_type type, |
| struct ecore_credit_pool_obj *vlans_pool) |
| { |
| union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj; |
| |
| ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, |
| rdata_mapping, state, pstate, type, NULL, |
| vlans_pool); |
| |
| vlan_obj->get_credit = ecore_get_credit_vlan; |
| vlan_obj->put_credit = ecore_put_credit_vlan; |
| vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan; |
| vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan; |
| |
| if (CHIP_IS_E1x(pdev)) { |
| ECORE_ERR("Do not support chips others than E2 and newer\n"); |
| BUG(); |
| } else { |
| vlan_obj->set_one_rule = ecore_set_one_vlan_e2; |
| vlan_obj->check_del = ecore_check_vlan_del; |
| vlan_obj->check_add = ecore_check_vlan_add; |
| vlan_obj->check_move = ecore_check_move; |
| vlan_obj->ramrod_cmd = |
| RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; |
| vlan_obj->get_n_elements = ecore_get_n_elements; |
| |
| /* Exe Queue */ |
| ecore_exe_queue_init(pdev, |
| &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, |
| qable_obj, ecore_validate_vlan_mac, |
| ecore_remove_vlan_mac, |
| ecore_optimize_vlan_mac, |
| ecore_execute_vlan_mac, |
| ecore_exeq_get_vlan); |
| } |
| } |
| |
| void ecore_init_vlan_mac_obj(struct _lm_device_t *pdev, |
| struct ecore_vlan_mac_obj *vlan_mac_obj, |
| u8 cl_id, u32 cid, u8 func_id, void *rdata, |
| lm_address_t rdata_mapping, int state, |
| unsigned long *pstate, ecore_obj_type type, |
| struct ecore_credit_pool_obj *macs_pool, |
| struct ecore_credit_pool_obj *vlans_pool) |
| { |
| union ecore_qable_obj *qable_obj = |
| (union ecore_qable_obj *)vlan_mac_obj; |
| |
| ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, |
| rdata_mapping, state, pstate, type, |
| macs_pool, vlans_pool); |
| |
| /* CAM pool handling */ |
| vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac; |
| vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac; |
| /* CAM offset is relevant for 57710 and 57711 chips only which have a |
| * single CAM for both MACs and VLAN-MAC pairs. So the offset |
| * will be taken from MACs' pool object only. |
| */ |
| vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac; |
| vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac; |
| |
| if (CHIP_IS_E1(pdev)) { |
| ECORE_ERR("Do not support chips others than E2\n"); |
| BUG(); |
| } else if (CHIP_IS_E1H(pdev)) { |
| vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e1h; |
| vlan_mac_obj->check_del = ecore_check_vlan_mac_del; |
| vlan_mac_obj->check_add = ecore_check_vlan_mac_add; |
| vlan_mac_obj->check_move = ecore_check_move_always_err; |
| vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; |
| |
| /* Exe Queue */ |
| ecore_exe_queue_init(pdev, |
| &vlan_mac_obj->exe_queue, 1, qable_obj, |
| ecore_validate_vlan_mac, |
| ecore_remove_vlan_mac, |
| ecore_optimize_vlan_mac, |
| ecore_execute_vlan_mac, |
| ecore_exeq_get_vlan_mac); |
| } else { |
| vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e2; |
| vlan_mac_obj->check_del = ecore_check_vlan_mac_del; |
| vlan_mac_obj->check_add = ecore_check_vlan_mac_add; |
| vlan_mac_obj->check_move = ecore_check_move; |
| vlan_mac_obj->ramrod_cmd = |
| RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; |
| |
| /* Exe Queue */ |
| ecore_exe_queue_init(pdev, |
| &vlan_mac_obj->exe_queue, |
| CLASSIFY_RULES_COUNT, |
| qable_obj, ecore_validate_vlan_mac, |
| ecore_remove_vlan_mac, |
| ecore_optimize_vlan_mac, |
| ecore_execute_vlan_mac, |
| ecore_exeq_get_vlan_mac); |
| } |
| } |
| |
| /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ |
| static INLINE void __storm_memset_mac_filters(struct _lm_device_t *pdev, |
| struct tstorm_eth_mac_filter_config *mac_filters, |
| u16 pf_id) |
| { |
| size_t size = sizeof(struct tstorm_eth_mac_filter_config); |
| |
| u32 addr = BAR_TSTRORM_INTMEM + |
| TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); |
| |
| __storm_memset_struct(pdev, addr, size, (u32 *)mac_filters); |
| } |
| |
| static int ecore_set_rx_mode_e1x(struct _lm_device_t *pdev, |
| struct ecore_rx_mode_ramrod_params *p) |
| { |
| /* update the pdev MAC filter structure */ |
| u32 mask = (1 << p->cl_id); |
| |
| struct tstorm_eth_mac_filter_config *mac_filters = |
| (struct tstorm_eth_mac_filter_config *)p->rdata; |
| |
| /* initial setting is drop-all */ |
| u8 drop_all_ucast = 1, drop_all_mcast = 1; |
| u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; |
| u8 unmatched_unicast = 0; |
| |
| /* In e1x there we only take into account rx accept flag since tx switching |
| * isn't enabled. */ |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags)) |
| /* accept matched ucast */ |
| drop_all_ucast = 0; |
| |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags)) |
| /* accept matched mcast */ |
| drop_all_mcast = 0; |
| |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { |
| /* accept all mcast */ |
| drop_all_ucast = 0; |
| accp_all_ucast = 1; |
| } |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { |
| /* accept all mcast */ |
| drop_all_mcast = 0; |
| accp_all_mcast = 1; |
| } |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags)) |
| /* accept (all) bcast */ |
| accp_all_bcast = 1; |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags)) |
| /* accept unmatched unicasts */ |
| unmatched_unicast = 1; |
| |
| mac_filters->ucast_drop_all = drop_all_ucast ? |
| mac_filters->ucast_drop_all | mask : |
| mac_filters->ucast_drop_all & ~mask; |
| |
| mac_filters->mcast_drop_all = drop_all_mcast ? |
| mac_filters->mcast_drop_all | mask : |
| mac_filters->mcast_drop_all & ~mask; |
| |
| mac_filters->ucast_accept_all = accp_all_ucast ? |
| mac_filters->ucast_accept_all | mask : |
| mac_filters->ucast_accept_all & ~mask; |
| |
| mac_filters->mcast_accept_all = accp_all_mcast ? |
| mac_filters->mcast_accept_all | mask : |
| mac_filters->mcast_accept_all & ~mask; |
| |
| mac_filters->bcast_accept_all = accp_all_bcast ? |
| mac_filters->bcast_accept_all | mask : |
| mac_filters->bcast_accept_all & ~mask; |
| |
| mac_filters->unmatched_unicast = unmatched_unicast ? |
| mac_filters->unmatched_unicast | mask : |
| mac_filters->unmatched_unicast & ~mask; |
| |
| ECORE_MSG(pdev, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" |
| "accp_mcast 0x%x\naccp_bcast 0x%x\n", |
| mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, |
| mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, |
| mac_filters->bcast_accept_all); |
| |
| /* write the MAC filter structure*/ |
| __storm_memset_mac_filters(pdev, mac_filters, p->func_id); |
| |
| /* The operation is completed */ |
| ECORE_CLEAR_BIT(p->state, p->pstate); |
| smp_mb__after_atomic(); |
| |
| return ECORE_SUCCESS; |
| } |
| |
| /* Setup ramrod data */ |
| static INLINE void ecore_rx_mode_set_rdata_hdr_e2(u32 cid, |
| struct eth_classify_header *hdr, |
| u8 rule_cnt) |
| { |
| hdr->echo = mm_cpu_to_le32(cid); |
| hdr->rule_cnt = rule_cnt; |
| } |
| |
| static INLINE void ecore_rx_mode_set_cmd_state_e2(struct _lm_device_t *pdev, |
| unsigned long *accept_flags, |
| struct eth_filter_rules_cmd *cmd, |
| BOOL clear_accept_all) |
| { |
| u16 state; |
| |
| /* start with 'drop-all' */ |
| state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | |
| ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; |
| |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags)) |
| state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; |
| |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags)) |
| state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; |
| |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) { |
| state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; |
| state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; |
| } |
| |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) { |
| state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; |
| state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; |
| } |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags)) |
| state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; |
| |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) { |
| state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; |
| state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; |
| } |
| if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags)) |
| state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; |
| |
| /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ |
| if (clear_accept_all) { |
| state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; |
| state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; |
| state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; |
| state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; |
| } |
| |
| cmd->state = mm_cpu_to_le16(state); |
| } |
| |
| static int ecore_set_rx_mode_e2(struct _lm_device_t *pdev, |
| struct ecore_rx_mode_ramrod_params *p) |
| { |
| struct eth_filter_rules_ramrod_data *data = p->rdata; |
| int rc; |
| u8 rule_idx = 0; |
| |
| /* Reset the ramrod data buffer */ |
| mm_memset(data, 0, sizeof(*data)); |
| |
| /* Setup ramrod data */ |
| |
| /* Tx (internal switching) */ |
| if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { |
| data->rules[rule_idx].client_id = p->cl_id; |
| data->rules[rule_idx].func_id = p->func_id; |
| |
| data->rules[rule_idx].cmd_general_data = |
| ETH_FILTER_RULES_CMD_TX_CMD; |
| |
| ecore_rx_mode_set_cmd_state_e2(pdev, &p->tx_accept_flags, |
| &(data->rules[rule_idx++]), |
| FALSE); |
| } |
| |
| /* Rx */ |
| if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { |
| data->rules[rule_idx].client_id = p->cl_id; |
| data->rules[rule_idx].func_id = p->func_id; |
| |
| data->rules[rule_idx].cmd_general_data = |
| ETH_FILTER_RULES_CMD_RX_CMD; |
| |
| ecore_rx_mode_set_cmd_state_e2(pdev, &p->rx_accept_flags, |
| &(data->rules[rule_idx++]), |
| FALSE); |
| } |
| |
| /* If FCoE Queue configuration has been requested configure the Rx and |
| * internal switching modes for this queue in separate rules. |
| * |
| * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: |
| * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. |
| */ |
| if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { |
| /* Tx (internal switching) */ |
| if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { |
| data->rules[rule_idx].client_id = FCOE_CID(pdev); |
| data->rules[rule_idx].func_id = p->func_id; |
| |
| data->rules[rule_idx].cmd_general_data = |
| ETH_FILTER_RULES_CMD_TX_CMD; |
| |
| ecore_rx_mode_set_cmd_state_e2(pdev, &p->tx_accept_flags, |
| &(data->rules[rule_idx]), |
| TRUE); |
| rule_idx++; |
| } |
| |
| /* Rx */ |