summaryrefslogtreecommitdiffstats
path: root/COPYING
blob: ca442d313d86dc67e0a2e5d584b465bd382cbf5c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
   NOTE! This copyright does *not* cover user programs that use kernel
 services by normal system calls - this is merely considered normal use
 of the kernel, and does *not* fall under the heading of "derived work".
 Also note that the GPL below is copyrighted by the Free Software
 Foundation, but the instance of code that it refers to (the Linux
 kernel) is copyrighted by me and others who actually wrote it.

 Also note that the only valid version of the GPL as far as the kernel
 is concerned is _this_ particular version of the license (ie v2, not
 v2.2 or v3.x or whatever), unless explicitly otherwise stated.

			Linus Torvalds

----------------------------------------

		    GNU GENERAL PUBLIC LICENSE
		       Version 2, June 1991

 Copyright (C) 1989, 1991 Free Software Foundation, Inc.
                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

			    Preamble

  The licenses for most software are designed to take away your
freedom to share and change it.  By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users.  This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it.  (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.)  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.

  To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have.  You must make sure that they, too, receive or can get the
source code.  And you must show them these terms so they know their
rights.

  We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.

  Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software.  If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.

  Finally, any free program is threatened constantly by software
patents.  We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary.  To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.

  The precise terms and conditions for copying, distribution and
modification follow.

		    GNU GENERAL PUBLIC LICENSE
   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

  0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License.  The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language.  (Hereinafter, translation is included without limitation in
the term "modification".)  Each licensee is addressed as "you".

Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope.  The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.

  1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.

You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.

  2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:

    a) You must cause the modified files to carry prominent notices
    stating that you changed the files and the date of any change.

    b) You must cause any work that you distribute or publish, that in
    whole or in part contains or is derived from the Program or any
    part thereof, to be licensed as a whole at no charge to all third
    parties under the terms of this License.

    c) If the modified program normally reads commands interactively
    when run, you must cause it, when started running for such
    interactive use in the most ordinary way, to print or display an
    announcement including an appropriate copyright notice and a
    notice that there is no warranty (or else, saying that you provide
    a warranty) and that users may redistribute the program under
    these conditions, and telling the user how to view a copy of this
    License.  (Exception: if the Program itself is interactive but
    does not normally print such an announcement, your work based on
    the Program is not required to print an announcement.)

These requirements apply to the modified work as a whole.  If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works.  But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.

Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.

In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.

  3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:

    a) Accompany it with the complete corresponding machine-readable
    source code, which must be distributed under the terms of Sections
    1 and 2 above on a medium customarily used for software interchange; or,

    b) Accompany it with a written offer, valid for at least three
    years, to give any third party, for a charge no more than your
    cost of physically performing source distribution, a complete
    machine-readable copy of the corresponding source code, to be
    distributed under the terms of Sections 1 and 2 above on a medium
    customarily used for software interchange; or,

    c) Accompany it with the information you received as to the offer
    to distribute corresponding source code.  (This alternative is
    allowed only for noncommercial distribution and only if you
    received the program in object code or executable form with such
    an offer, in accord with Subsection b above.)

The source code for a work means the preferred form of the work for
making modifications to it.  For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable.  However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.

If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.

  4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License.  Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.

  5. You are not required to accept this License, since you have not
signed it.  However, nothing else grants you permission to modify or
distribute the Program or its derivative works.  These actions are
prohibited by law if you do not accept this License.  Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.

  6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions.  You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.

  7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all.  For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.

If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.

It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices.  Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.

This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.

  8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded.  In such case, this License incorporates
the limitation as if written in the body of this License.

  9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

Each version is given a distinguishing version number.  If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation.  If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.

  10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission.  For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this.  Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.

			    NO WARRANTY

  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.

  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.

		     END OF TERMS AND CONDITIONS

	    How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) <year>  <name of author>

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA


Also add information on how to contact you by electronic and paper mail.

If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:

    Gnomovision version 69, Copyright (C) year name of author
    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.

You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary.  Here is a sample; alter the names:

  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
  `Gnomovision' (which makes passes at compilers) written by James Hacker.

  <signature of Ty Coon>, 1 April 1989
  Ty Coon, President of Vice

This General Public License does not permit incorporating your program into
proprietary programs.  If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library.  If this is what you want to do, use the GNU Library General
Public License instead of this License.
2169' href='#n2169'>2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320
/*
 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
 * driver for Linux.
 *
 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/ethtool.h>
#include <linux/mdio.h>

#include "t4vf_common.h"
#include "t4vf_defs.h"

#include "../cxgb4/t4_regs.h"
#include "../cxgb4/t4_msg.h"

/*
 * Generic information about the driver.
 */
#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"

/*
 * Module Parameters.
 * ==================
 */

/*
 * Default ethtool "message level" for adapters.
 */
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)

static int dflt_msg_enable = DFLT_MSG_ENABLE;

module_param(dflt_msg_enable, int, 0644);
MODULE_PARM_DESC(dflt_msg_enable,
		 "default adapter ethtool message level bitmap, "
		 "deprecated parameter");

/*
 * The driver uses the best interrupt scheme available on a platform in the
 * order MSI-X then MSI.  This parameter determines which of these schemes the
 * driver may consider as follows:
 *
 *     msi = 2: choose from among MSI-X and MSI
 *     msi = 1: only consider MSI interrupts
 *
 * Note that unlike the Physical Function driver, this Virtual Function driver
 * does _not_ support legacy INTx interrupts (this limitation is mandated by
 * the PCI-E SR-IOV standard).
 */
#define MSI_MSIX	2
#define MSI_MSI		1
#define MSI_DEFAULT	MSI_MSIX

static int msi = MSI_DEFAULT;

module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");

/*
 * Fundamental constants.
 * ======================
 */

enum {
	MAX_TXQ_ENTRIES		= 16384,
	MAX_RSPQ_ENTRIES	= 16384,
	MAX_RX_BUFFERS		= 16384,

	MIN_TXQ_ENTRIES		= 32,
	MIN_RSPQ_ENTRIES	= 128,
	MIN_FL_ENTRIES		= 16,

	/*
	 * For purposes of manipulating the Free List size we need to
	 * recognize that Free Lists are actually Egress Queues (the host
	 * produces free buffers which the hardware consumes), Egress Queues
	 * indices are all in units of Egress Context Units bytes, and free
	 * list entries are 64-bit PCI DMA addresses.  And since the state of
	 * the Producer Index == the Consumer Index implies an EMPTY list, we
	 * always have at least one Egress Unit's worth of Free List entries
	 * unused.  See sge.c for more details ...
	 */
	EQ_UNIT = SGE_EQ_IDXSIZE,
	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
	MIN_FL_RESID = FL_PER_EQ_UNIT,
};

/*
 * Global driver state.
 * ====================
 */

static struct dentry *cxgb4vf_debugfs_root;

/*
 * OS "Callback" functions.
 * ========================
 */

/*
 * The link status has changed on the indicated "port" (Virtual Interface).
 */
void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
{
	struct net_device *dev = adapter->port[pidx];

	/*
	 * If the port is disabled or the current recorded "link up"
	 * status matches the new status, just return.
	 */
	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
		return;

	/*
	 * Tell the OS that the link status has changed and print a short
	 * informative message on the console about the event.
	 */
	if (link_ok) {
		const char *s;
		const char *fc;
		const struct port_info *pi = netdev_priv(dev);

		netif_carrier_on(dev);

		switch (pi->link_cfg.speed) {
		case 40000:
			s = "40Gbps";
			break;

		case 10000:
			s = "10Gbps";
			break;

		case 1000:
			s = "1000Mbps";
			break;

		case 100:
			s = "100Mbps";
			break;

		default:
			s = "unknown";
			break;
		}

		switch (pi->link_cfg.fc) {
		case PAUSE_RX:
			fc = "RX";
			break;

		case PAUSE_TX:
			fc = "TX";
			break;

		case PAUSE_RX|PAUSE_TX:
			fc = "RX/TX";
			break;

		default:
			fc = "no";
			break;
		}

		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
	} else {
		netif_carrier_off(dev);
		netdev_info(dev, "link down\n");
	}
}

/*
 * THe port module type has changed on the indicated "port" (Virtual
 * Interface).
 */
void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
{
	static const char * const mod_str[] = {
		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
	};
	const struct net_device *dev = adapter->port[pidx];
	const struct port_info *pi = netdev_priv(dev);

	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
			 dev->name);
	else if (pi->mod_type < ARRAY_SIZE(mod_str))
		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
			 dev->name, mod_str[pi->mod_type]);
	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
			 "module inserted\n", dev->name);
	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
			 "forcing TWINAX\n", dev->name);
	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
			 dev->name);
	else
		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
			 "inserted\n", dev->name, pi->mod_type);
}

/*
 * Net device operations.
 * ======================
 */




/*
 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
 * Interface).
 */
static int link_start(struct net_device *dev)
{
	int ret;
	struct port_info *pi = netdev_priv(dev);

	/*
	 * We do not set address filters and promiscuity here, the stack does
	 * that step explicitly. Enable vlan accel.
	 */
	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
			      true);
	if (ret == 0) {
		ret = t4vf_change_mac(pi->adapter, pi->viid,
				      pi->xact_addr_filt, dev->dev_addr, true);
		if (ret >= 0) {
			pi->xact_addr_filt = ret;
			ret = 0;
		}
	}

	/*
	 * We don't need to actually "start the link" itself since the
	 * firmware will do that for us when the first Virtual Interface
	 * is enabled on a port.
	 */
	if (ret == 0)
		ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
	return ret;
}

/*
 * Name the MSI-X interrupts.
 */
static void name_msix_vecs(struct adapter *adapter)
{
	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
	int pidx;

	/*
	 * Firmware events.
	 */
	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
		 "%s-FWeventq", adapter->name);
	adapter->msix_info[MSIX_FW].desc[namelen] = 0;

	/*
	 * Ethernet queues.
	 */
	for_each_port(adapter, pidx) {
		struct net_device *dev = adapter->port[pidx];
		const struct port_info *pi = netdev_priv(dev);
		int qs, msi;

		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
			snprintf(adapter->msix_info[msi].desc, namelen,
				 "%s-%d", dev->name, qs);
			adapter->msix_info[msi].desc[namelen] = 0;
		}
	}
}

/*
 * Request all of our MSI-X resources.
 */
static int request_msix_queue_irqs(struct adapter *adapter)
{
	struct sge *s = &adapter->sge;
	int rxq, msi, err;

	/*
	 * Firmware events.
	 */
	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
	if (err)
		return err;

	/*
	 * Ethernet queues.
	 */
	msi = MSIX_IQFLINT;
	for_each_ethrxq(s, rxq) {
		err = request_irq(adapter->msix_info[msi].vec,
				  t4vf_sge_intr_msix, 0,
				  adapter->msix_info[msi].desc,
				  &s->ethrxq[rxq].rspq);
		if (err)
			goto err_free_irqs;
		msi++;
	}
	return 0;

err_free_irqs:
	while (--rxq >= 0)
		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
	return err;
}

/*
 * Free our MSI-X resources.
 */
static void free_msix_queue_irqs(struct adapter *adapter)
{
	struct sge *s = &adapter->sge;
	int rxq, msi;

	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
	msi = MSIX_IQFLINT;
	for_each_ethrxq(s, rxq)
		free_irq(adapter->msix_info[msi++].vec,
			 &s->ethrxq[rxq].rspq);
}

/*
 * Turn on NAPI and start up interrupts on a response queue.
 */
static void qenable(struct sge_rspq *rspq)
{
	napi_enable(&rspq->napi);

	/*
	 * 0-increment the Going To Sleep register to start the timer and
	 * enable interrupts.
	 */
	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
		     CIDXINC_V(0) |
		     SEINTARM_V(rspq->intr_params) |
		     INGRESSQID_V(rspq->cntxt_id));
}

/*
 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
 */
static void enable_rx(struct adapter *adapter)
{
	int rxq;
	struct sge *s = &adapter->sge;

	for_each_ethrxq(s, rxq)
		qenable(&s->ethrxq[rxq].rspq);
	qenable(&s->fw_evtq);

	/*
	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
	 * its Going To Sleep register here to get it started.
	 */
	if (adapter->flags & USING_MSI)
		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
			     CIDXINC_V(0) |
			     SEINTARM_V(s->intrq.intr_params) |
			     INGRESSQID_V(s->intrq.cntxt_id));

}

/*
 * Wait until all NAPI handlers are descheduled.
 */
static void quiesce_rx(struct adapter *adapter)
{
	struct sge *s = &adapter->sge;
	int rxq;

	for_each_ethrxq(s, rxq)
		napi_disable(&s->ethrxq[rxq].rspq.napi);
	napi_disable(&s->fw_evtq.napi);
}

/*
 * Response queue handler for the firmware event queue.
 */
static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
			  const struct pkt_gl *gl)
{
	/*
	 * Extract response opcode and get pointer to CPL message body.
	 */
	struct adapter *adapter = rspq->adapter;
	u8 opcode = ((const struct rss_header *)rsp)->opcode;
	void *cpl = (void *)(rsp + 1);

	switch (opcode) {
	case CPL_FW6_MSG: {
		/*
		 * We've received an asynchronous message from the firmware.
		 */
		const struct cpl_fw6_msg *fw_msg = cpl;
		if (fw_msg->type == FW6_TYPE_CMD_RPL)
			t4vf_handle_fw_rpl(adapter, fw_msg->data);
		break;
	}

	case CPL_FW4_MSG: {
		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
		 */
		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
		if (opcode != CPL_SGE_EGR_UPDATE) {
			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
				, opcode);
			break;
		}
		cpl = (void *)p;
		/*FALLTHROUGH*/
	}

	case CPL_SGE_EGR_UPDATE: {
		/*
		 * We've received an Egress Queue Status Update message.  We
		 * get these, if the SGE is configured to send these when the
		 * firmware passes certain points in processing our TX
		 * Ethernet Queue or if we make an explicit request for one.
		 * We use these updates to determine when we may need to
		 * restart a TX Ethernet Queue which was stopped for lack of
		 * free TX Queue Descriptors ...
		 */
		const struct cpl_sge_egr_update *p = cpl;
		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
		struct sge *s = &adapter->sge;
		struct sge_txq *tq;
		struct sge_eth_txq *txq;
		unsigned int eq_idx;

		/*
		 * Perform sanity checking on the Queue ID to make sure it
		 * really refers to one of our TX Ethernet Egress Queues which
		 * is active and matches the queue's ID.  None of these error
		 * conditions should ever happen so we may want to either make
		 * them fatal and/or conditionalized under DEBUG.
		 */
		eq_idx = EQ_IDX(s, qid);
		if (unlikely(eq_idx >= MAX_EGRQ)) {
			dev_err(adapter->pdev_dev,
				"Egress Update QID %d out of range\n", qid);
			break;
		}
		tq = s->egr_map[eq_idx];
		if (unlikely(tq == NULL)) {
			dev_err(adapter->pdev_dev,
				"Egress Update QID %d TXQ=NULL\n", qid);
			break;
		}
		txq = container_of(tq, struct sge_eth_txq, q);
		if (unlikely(tq->abs_id != qid)) {
			dev_err(adapter->pdev_dev,
				"Egress Update QID %d refers to TXQ %d\n",
				qid, tq->abs_id);
			break;
		}

		/*
		 * Restart a stopped TX Queue which has less than half of its
		 * TX ring in use ...
		 */
		txq->q.restarts++;
		netif_tx_wake_queue(txq->txq);
		break;
	}

	default:
		dev_err(adapter->pdev_dev,
			"unexpected CPL %#x on FW event queue\n", opcode);
	}

	return 0;
}

/*
 * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
 * to use and initializes them.  We support multiple "Queue Sets" per port if
 * we have MSI-X, otherwise just one queue set per port.
 */
static int setup_sge_queues(struct adapter *adapter)
{
	struct sge *s = &adapter->sge;
	int err, pidx, msix;

	/*
	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
	 * state.
	 */
	bitmap_zero(s->starving_fl, MAX_EGRQ);

	/*
	 * If we're using MSI interrupt mode we need to set up a "forwarded
	 * interrupt" queue which we'll set up with our MSI vector.  The rest
	 * of the ingress queues will be set up to forward their interrupts to
	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
	 * the intrq's queue ID as the interrupt forwarding queue for the
	 * subsequent calls ...
	 */
	if (adapter->flags & USING_MSI) {
		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
					 adapter->port[0], 0, NULL, NULL);
		if (err)
			goto err_free_queues;
	}

	/*
	 * Allocate our ingress queue for asynchronous firmware messages.
	 */
	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
				 MSIX_FW, NULL, fwevtq_handler);
	if (err)
		goto err_free_queues;

	/*
	 * Allocate each "port"'s initial Queue Sets.  These can be changed
	 * later on ... up to the point where any interface on the adapter is
	 * brought up at which point lots of things get nailed down
	 * permanently ...
	 */
	msix = MSIX_IQFLINT;
	for_each_port(adapter, pidx) {
		struct net_device *dev = adapter->port[pidx];
		struct port_info *pi = netdev_priv(dev);
		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
		int qs;

		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
						 dev, msix++,
						 &rxq->fl, t4vf_ethrx_handler);
			if (err)
				goto err_free_queues;

			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
					     netdev_get_tx_queue(dev, qs),
					     s->fw_evtq.cntxt_id);
			if (err)
				goto err_free_queues;

			rxq->rspq.idx = qs;
			memset(&rxq->stats, 0, sizeof(rxq->stats));
		}
	}

	/*
	 * Create the reverse mappings for the queues.
	 */
	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
	for_each_port(adapter, pidx) {
		struct net_device *dev = adapter->port[pidx];
		struct port_info *pi = netdev_priv(dev);
		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
		int qs;

		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
			EQ_MAP(s, txq->q.abs_id) = &txq->q;

			/*
			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
			 * for Free Lists but since all of the Egress Queues
			 * (including Free Lists) have Relative Queue IDs
			 * which are computed as Absolute - Base Queue ID, we
			 * can synthesize the Absolute Queue IDs for the Free
			 * Lists.  This is useful for debugging purposes when
			 * we want to dump Queue Contexts via the PF Driver.
			 */
			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
		}
	}
	return 0;

err_free_queues:
	t4vf_free_sge_resources(adapter);
	return err;
}

/*
 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
 * queues.  We configure the RSS CPU lookup table to distribute to the number
 * of HW receive queues, and the response queue lookup table to narrow that
 * down to the response queues actually configured for each "port" (Virtual
 * Interface).  We always configure the RSS mapping for all ports since the
 * mapping table has plenty of entries.
 */
static int setup_rss(struct adapter *adapter)
{
	int pidx;

	for_each_port(adapter, pidx) {
		struct port_info *pi = adap2pinfo(adapter, pidx);
		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
		u16 rss[MAX_PORT_QSETS];
		int qs, err;

		for (qs = 0; qs < pi->nqsets; qs++)
			rss[qs] = rxq[qs].rspq.abs_id;

		err = t4vf_config_rss_range(adapter, pi->viid,
					    0, pi->rss_size, rss, pi->nqsets);
		if (err)
			return err;

		/*
		 * Perform Global RSS Mode-specific initialization.
		 */
		switch (adapter->params.rss.mode) {
		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
			/*
			 * If Tunnel All Lookup isn't specified in the global
			 * RSS Configuration, then we need to specify a
			 * default Ingress Queue for any ingress packets which
			 * aren't hashed.  We'll use our first ingress queue
			 * ...
			 */
			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
				union rss_vi_config config;
				err = t4vf_read_rss_vi_config(adapter,
							      pi->viid,
							      &config);
				if (err)
					return err;
				config.basicvirtual.defaultq =
					rxq[0].rspq.abs_id;
				err = t4vf_write_rss_vi_config(adapter,
							       pi->viid,
							       &config);
				if (err)
					return err;
			}
			break;
		}
	}

	return 0;
}

/*
 * Bring the adapter up.  Called whenever we go from no "ports" open to having
 * one open.  This function performs the actions necessary to make an adapter
 * operational, such as completing the initialization of HW modules, and
 * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
 * this is called "cxgb_up" in the PF Driver.)
 */
static int adapter_up(struct adapter *adapter)
{
	int err;

	/*
	 * If this is the first time we've been called, perform basic
	 * adapter setup.  Once we've done this, many of our adapter
	 * parameters can no longer be changed ...
	 */
	if ((adapter->flags & FULL_INIT_DONE) == 0) {
		err = setup_sge_queues(adapter);
		if (err)
			return err;
		err = setup_rss(adapter);
		if (err) {
			t4vf_free_sge_resources(adapter);
			return err;
		}

		if (adapter->flags & USING_MSIX)
			name_msix_vecs(adapter);

		adapter->flags |= FULL_INIT_DONE;
	}

	/*
	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
	 */
	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
	if (adapter->flags & USING_MSIX)
		err = request_msix_queue_irqs(adapter);
	else
		err = request_irq(adapter->pdev->irq,
				  t4vf_intr_handler(adapter), 0,
				  adapter->name, adapter);
	if (err) {
		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
			err);
		return err;
	}

	/*
	 * Enable NAPI ingress processing and return success.
	 */
	enable_rx(adapter);
	t4vf_sge_start(adapter);

	return 0;
}

/*
 * Bring the adapter down.  Called whenever the last "port" (Virtual
 * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
 * Driver.)
 */
static void adapter_down(struct adapter *adapter)
{
	/*
	 * Free interrupt resources.
	 */
	if (adapter->flags & USING_MSIX)
		free_msix_queue_irqs(adapter);
	else
		free_irq(adapter->pdev->irq, adapter);

	/*
	 * Wait for NAPI handlers to finish.
	 */
	quiesce_rx(adapter);
}

/*
 * Start up a net device.
 */
static int cxgb4vf_open(struct net_device *dev)
{
	int err;
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;

	/*
	 * If this is the first interface that we're opening on the "adapter",
	 * bring the "adapter" up now.
	 */
	if (adapter->open_device_map == 0) {
		err = adapter_up(adapter);
		if (err)
			return err;
	}

	/*
	 * Note that this interface is up and start everything up ...
	 */
	err = link_start(dev);
	if (err)
		goto err_unwind;

	netif_tx_start_all_queues(dev);
	set_bit(pi->port_id, &adapter->open_device_map);
	return 0;

err_unwind:
	if (adapter->open_device_map == 0)
		adapter_down(adapter);
	return err;
}

/*
 * Shut down a net device.  This routine is called "cxgb_close" in the PF
 * Driver ...
 */
static int cxgb4vf_stop(struct net_device *dev)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;

	netif_tx_stop_all_queues(dev);
	netif_carrier_off(dev);
	t4vf_enable_vi(adapter, pi->viid, false, false);
	pi->link_cfg.link_ok = 0;

	clear_bit(pi->port_id, &adapter->open_device_map);
	if (adapter->open_device_map == 0)
		adapter_down(adapter);
	return 0;
}

/*
 * Translate our basic statistics into the standard "ifconfig" statistics.
 */
static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
{
	struct t4vf_port_stats stats;
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adapter = pi->adapter;
	struct net_device_stats *ns = &dev->stats;
	int err;

	spin_lock(&adapter->stats_lock);
	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
	spin_unlock(&adapter->stats_lock);

	memset(ns, 0, sizeof(*ns));
	if (err)
		return ns;

	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
			stats.tx_ucast_bytes + stats.tx_offload_bytes);
	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
			  stats.tx_ucast_frames + stats.tx_offload_frames);
	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
			stats.rx_ucast_bytes);
	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
			  stats.rx_ucast_frames);
	ns->multicast = stats.rx_mcast_frames;
	ns->tx_errors = stats.tx_drop_frames;
	ns->rx_errors = stats.rx_err_frames;

	return ns;
}

static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
{
	struct adapter *adapter = pi->adapter;
	u64 vec = 0;
	bool ucast = false;
	struct hash_mac_addr *entry;

	/* Calculate the hash vector for the updated list and program it */
	list_for_each_entry(entry, &adapter->mac_hlist, list) {
		ucast |= is_unicast_ether_addr(entry->addr);
		vec |= (1ULL << hash_mac_addr(entry->addr));
	}
	return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
}

static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
{
	struct port_info *pi = netdev_priv(netdev);
	struct adapter *adapter = pi->adapter;
	int ret;
	u64 mhash = 0;
	u64 uhash = 0;
	bool free = false;
	bool ucast = is_unicast_ether_addr(mac_addr);
	const u8 *maclist[1] = {mac_addr};
	struct hash_mac_addr *new_entry;

	ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
				  NULL, ucast ? &uhash : &mhash, false);
	if (ret < 0)
		goto out;
	/* if hash != 0, then add the addr to hash addr list
	 * so on the end we will calculate the hash for the
	 * list and program it
	 */
	if (uhash || mhash) {
		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
		if (!new_entry)
			return -ENOMEM;
		ether_addr_copy(new_entry->addr, mac_addr);
		list_add_tail(&new_entry->list, &adapter->mac_hlist);
		ret = cxgb4vf_set_addr_hash(pi);
	}
out:
	return ret < 0 ? ret : 0;
}

static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
{
	struct port_info *pi = netdev_priv(netdev);
	struct adapter *adapter = pi->adapter;
	int ret;
	const u8 *maclist[1] = {mac_addr};
	struct hash_mac_addr *entry, *tmp;

	/* If the MAC address to be removed is in the hash addr
	 * list, delete it from the list and update hash vector
	 */
	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
		if (ether_addr_equal(entry->addr, mac_addr)) {
			list_del(&entry->list);
			kfree(entry);
			return cxgb4vf_set_addr_hash(pi);
		}
	}

	ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
	return ret < 0 ? -EINVAL : 0;
}

/*
 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
 * If @mtu is -1 it is left unchanged.
 */
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
	struct port_info *pi = netdev_priv(dev);

	__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
	__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
	return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
			       (dev->flags & IFF_PROMISC) != 0,
			       (dev->flags & IFF_ALLMULTI) != 0,
			       1, -1, sleep_ok);
}

/*
 * Set the current receive modes on the device.
 */
static void cxgb4vf_set_rxmode(struct net_device *dev)
{
	/* unfortunately we can't return errors to the stack */
	set_rxmode(dev, -1, false);
}

/*
 * Find the entry in the interrupt holdoff timer value array which comes
 * closest to the specified interrupt holdoff value.
 */
static int closest_timer(const struct sge *s, int us)
{
	int i, timer_idx = 0, min_delta = INT_MAX;

	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
		int delta = us - s->timer_val[i];
		if (delta < 0)
			delta = -delta;
		if (delta < min_delta) {
			min_delta = delta;
			timer_idx = i;
		}
	}
	return timer_idx;
}

static int closest_thres(const struct sge *s, int thres)
{
	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;

	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
		delta = thres - s->counter_val[i];
		if (delta < 0)
			delta = -delta;
		if (delta < min_delta) {
			min_delta = delta;
			pktcnt_idx = i;
		}
	}
	return pktcnt_idx;
}

/*
 * Return a queue's interrupt hold-off time in us.  0 means no timer.
 */
static unsigned int qtimer_val(const struct adapter *adapter,
			       const struct sge_rspq *rspq)
{
	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);

	return timer_idx < SGE_NTIMERS
		? adapter->sge.timer_val[timer_idx]
		: 0;
}

/**
 *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
 *	@adapter: the adapter
 *	@rspq: the RX response queue
 *	@us: the hold-off time in us, or 0 to disable timer
 *	@cnt: the hold-off packet count, or 0 to disable counter
 *
 *	Sets an RX response queue's interrupt hold-off time and packet count.
 *	At least one of the two needs to be enabled for the queue to generate
 *	interrupts.
 */
static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
			       unsigned int us, unsigned int cnt)
{
	unsigned int timer_idx;

	/*
	 * If both the interrupt holdoff timer and count are specified as
	 * zero, default to a holdoff count of 1 ...
	 */
	if ((us | cnt) == 0)
		cnt = 1;

	/*
	 * If an interrupt holdoff count has been specified, then find the
	 * closest configured holdoff count and use that.  If the response
	 * queue has already been created, then update its queue context
	 * parameters ...
	 */
	if (cnt) {
		int err;
		u32 v, pktcnt_idx;

		pktcnt_idx = closest_thres(&adapter->sge, cnt);
		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
			    FW_PARAMS_PARAM_X_V(
					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
			if (err)
				return err;
		}
		rspq->pktcnt_idx = pktcnt_idx;
	}

	/*
	 * Compute the closest holdoff timer index from the supplied holdoff
	 * timer value.
	 */
	timer_idx = (us == 0
		     ? SGE_TIMER_RSTRT_CNTR
		     : closest_timer(&adapter->sge, us));

	/*
	 * Update the response queue's interrupt coalescing parameters and
	 * return success.
	 */
	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
			     QINTR_CNT_EN_V(cnt > 0));
	return 0;
}

/*
 * Return a version number to identify the type of adapter.  The scheme is:
 * - bits 0..9: chip version
 * - bits 10..15: chip revision
 */
static inline unsigned int mk_adap_vers(const struct adapter *adapter)
{
	/*
	 * Chip version 4, revision 0x3f (cxgb4vf).
	 */
	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
}

/*
 * Execute the specified ioctl command.
 */
static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
	int ret = 0;

	switch (cmd) {
	    /*
	     * The VF Driver doesn't have access to any of the other
	     * common Ethernet device ioctl()'s (like reading/writing
	     * PHY registers, etc.
	     */

	default:
		ret = -EOPNOTSUPP;
		break;
	}
	return ret;
}

/*
 * Change the device's MTU.
 */
static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
{
	int ret;
	struct port_info *pi = netdev_priv(dev);

	/* accommodate SACK */
	if (new_mtu < 81)
		return -EINVAL;

	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
			      -1, -1, -1, -1, true);
	if (!ret)
		dev->mtu = new_mtu;
	return ret;
}

static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
	netdev_features_t features)
{
	/*
	 * Since there is no support for separate rx/tx vlan accel
	 * enable/disable make sure tx flag is always in same state as rx.
	 */
	if (features & NETIF_F_HW_VLAN_CTAG_RX)
		features |= NETIF_F_HW_VLAN_CTAG_TX;
	else
		features &= ~NETIF_F_HW_VLAN_CTAG_TX;

	return features;
}

static int cxgb4vf_set_features(struct net_device *dev,
	netdev_features_t features)
{
	struct port_info *pi = netdev_priv(dev);
	netdev_features_t changed = dev->features ^ features;

	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
				features & NETIF_F_HW_VLAN_CTAG_TX, 0);

	return 0;
}

/*
 * Change the devices MAC address.
 */
static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
{
	int ret;
	struct sockaddr *addr = _addr;
	struct port_info *pi = netdev_priv(dev);

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
			      addr->sa_data, true);
	if (ret < 0)
		return ret;

	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
	pi->xact_addr_filt = ret;
	return 0;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
/*
 * Poll all of our receive queues.  This is called outside of normal interrupt
 * context.
 */
static void cxgb4vf_poll_controller(struct net_device *dev)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;

	if (adapter->flags & USING_MSIX) {
		struct sge_eth_rxq *rxq;
		int nqsets;

		rxq = &adapter->sge.ethrxq[pi->first_qset];
		for (nqsets = pi->nqsets; nqsets; nqsets--) {
			t4vf_sge_intr_msix(0, &rxq->rspq);
			rxq++;
		}
	} else
		t4vf_intr_handler(adapter)(0, adapter);
}
#endif

/*
 * Ethtool operations.
 * ===================
 *
 * Note that we don't support any ethtool operations which change the physical
 * state of the port to which we're linked.
 */

/**
 *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
 *	@port_type: Firmware Port Type
 *	@mod_type: Firmware Module Type
 *
 *	Translate Firmware Port/Module type to Ethtool Port Type.
 */
static int from_fw_port_mod_type(enum fw_port_type port_type,
				 enum fw_port_module_type mod_type)
{
	if (port_type == FW_PORT_TYPE_BT_SGMII ||
	    port_type == FW_PORT_TYPE_BT_XFI ||
	    port_type == FW_PORT_TYPE_BT_XAUI) {
		return PORT_TP;
	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
		return PORT_FIBRE;
	} else if (port_type == FW_PORT_TYPE_SFP ||
		   port_type == FW_PORT_TYPE_QSFP_10G ||
		   port_type == FW_PORT_TYPE_QSA ||
		   port_type == FW_PORT_TYPE_QSFP) {
		if (mod_type == FW_PORT_MOD_TYPE_LR ||
		    mod_type == FW_PORT_MOD_TYPE_SR ||
		    mod_type == FW_PORT_MOD_TYPE_ER ||
		    mod_type == FW_PORT_MOD_TYPE_LRM)
			return PORT_FIBRE;
		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
			return PORT_DA;
		else
			return PORT_OTHER;
	}

	return PORT_OTHER;
}

/**
 *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
 *	@port_type: Firmware Port Type
 *	@fw_caps: Firmware Port Capabilities
 *	@link_mode_mask: ethtool Link Mode Mask
 *
 *	Translate a Firmware Port Capabilities specification to an ethtool
 *	Link Mode Mask.
 */
static void fw_caps_to_lmm(enum fw_port_type port_type,
			   unsigned int fw_caps,
			   unsigned long *link_mode_mask)
{
	#define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\
			 ## _BIT, link_mode_mask)

	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
		do { \
			if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
				SET_LMM(__lmm_name); \
		} while (0)

	switch (port_type) {
	case FW_PORT_TYPE_BT_SGMII:
	case FW_PORT_TYPE_BT_XFI:
	case FW_PORT_TYPE_BT_XAUI:
		SET_LMM(TP);
		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
		break;

	case FW_PORT_TYPE_KX4:
	case FW_PORT_TYPE_KX:
		SET_LMM(Backplane);
		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
		break;

	case FW_PORT_TYPE_KR:
		SET_LMM(Backplane);
		SET_LMM(10000baseKR_Full);
		break;

	case FW_PORT_TYPE_BP_AP:
		SET_LMM(Backplane);
		SET_LMM(10000baseR_FEC);
		SET_LMM(10000baseKR_Full);
		SET_LMM(1000baseKX_Full);
		break;

	case FW_PORT_TYPE_BP4_AP:
		SET_LMM(Backplane);
		SET_LMM(10000baseR_FEC);
		SET_LMM(10000baseKR_Full);
		SET_LMM(1000baseKX_Full);
		SET_LMM(10000baseKX4_Full);
		break;

	case FW_PORT_TYPE_FIBER_XFI:
	case FW_PORT_TYPE_FIBER_XAUI:
	case FW_PORT_TYPE_SFP:
	case FW_PORT_TYPE_QSFP_10G:
	case FW_PORT_TYPE_QSA:
		SET_LMM(FIBRE);
		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
		break;

	case FW_PORT_TYPE_BP40_BA:
	case FW_PORT_TYPE_QSFP:
		SET_LMM(FIBRE);
		SET_LMM(40000baseSR4_Full);
		break;

	case FW_PORT_TYPE_CR_QSFP:
	case FW_PORT_TYPE_SFP28:
		SET_LMM(FIBRE);
		SET_LMM(25000baseCR_Full);
		break;

	case FW_PORT_TYPE_KR4_100G:
	case FW_PORT_TYPE_CR4_QSFP:
		SET_LMM(FIBRE);
		SET_LMM(100000baseCR4_Full);
		break;

	default:
		break;
	}

	FW_CAPS_TO_LMM(ANEG, Autoneg);
	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);

	#undef FW_CAPS_TO_LMM
	#undef SET_LMM
}

static int cxgb4vf_get_link_ksettings(struct net_device *dev,
				      struct ethtool_link_ksettings
							*link_ksettings)
{
	const struct port_info *pi = netdev_priv(dev);
	struct ethtool_link_settings *base = &link_ksettings->base;

	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);

	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);

	if (pi->mdio_addr >= 0) {
		base->phy_address = pi->mdio_addr;
		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
				      ? ETH_MDIO_SUPPORTS_C22
				      : ETH_MDIO_SUPPORTS_C45);
	} else {
		base->phy_address = 255;
		base->mdio_support = 0;
	}

	fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported,
		       link_ksettings->link_modes.supported);
	fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising,
		       link_ksettings->link_modes.advertising);
	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising,
		       link_ksettings->link_modes.lp_advertising);

	if (netif_carrier_ok(dev)) {
		base->speed = pi->link_cfg.speed;
		base->duplex = DUPLEX_FULL;
	} else {
		base->speed = SPEED_UNKNOWN;
		base->duplex = DUPLEX_UNKNOWN;
	}

	base->autoneg = pi->link_cfg.autoneg;
	if (pi->link_cfg.supported & FW_PORT_CAP_ANEG)
		ethtool_link_ksettings_add_link_mode(link_ksettings,
						     supported, Autoneg);
	if (pi->link_cfg.autoneg)
		ethtool_link_ksettings_add_link_mode(link_ksettings,
						     advertising, Autoneg);

	return 0;
}

/*
 * Return our driver information.
 */
static void cxgb4vf_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *drvinfo)
{
	struct adapter *adapter = netdev2adap(dev);

	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
		sizeof(drvinfo->bus_info));
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
}

/*
 * Return current adapter message level.
 */
static u32 cxgb4vf_get_msglevel(struct net_device *dev)
{
	return netdev2adap(dev)->msg_enable;
}

/*
 * Set current adapter message level.
 */
static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
{
	netdev2adap(dev)->msg_enable = msglevel;
}

/*
 * Return the device's current Queue Set ring size parameters along with the
 * allowed maximum values.  Since ethtool doesn't understand the concept of
 * multi-queue devices, we just return the current values associated with the
 * first Queue Set.
 */
static void cxgb4vf_get_ringparam(struct net_device *dev,
				  struct ethtool_ringparam *rp)
{
	const struct port_info *pi = netdev_priv(dev);
	const struct sge *s = &pi->adapter->sge;

	rp->rx_max_pending = MAX_RX_BUFFERS;
	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
	rp->rx_jumbo_max_pending = 0;
	rp->tx_max_pending = MAX_TXQ_ENTRIES;

	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
	rp->rx_jumbo_pending = 0;
	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
}

/*
 * Set the Queue Set ring size parameters for the device.  Again, since
 * ethtool doesn't allow for the concept of multiple queues per device, we'll
 * apply these new values across all of the Queue Sets associated with the
 * device -- after vetting them of course!
 */
static int cxgb4vf_set_ringparam(struct net_device *dev,
				 struct ethtool_ringparam *rp)
{
	const struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	int qs;

	if (rp->rx_pending > MAX_RX_BUFFERS ||
	    rp->rx_jumbo_pending ||
	    rp->tx_pending > MAX_TXQ_ENTRIES ||
	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
	    rp->rx_pending < MIN_FL_ENTRIES ||
	    rp->tx_pending < MIN_TXQ_ENTRIES)
		return -EINVAL;

	if (adapter->flags & FULL_INIT_DONE)
		return -EBUSY;

	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
		s->ethtxq[qs].q.size = rp->tx_pending;
	}
	return 0;
}

/*
 * Return the interrupt holdoff timer and count for the first Queue Set on the
 * device.  Our extension ioctl() (the cxgbtool interface) allows the
 * interrupt holdoff timer to be read on all of the device's Queue Sets.
 */
static int cxgb4vf_get_coalesce(struct net_device *dev,
				struct ethtool_coalesce *coalesce)
{
	const struct port_info *pi = netdev_priv(dev);
	const struct adapter *adapter = pi->adapter;
	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;

	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
	coalesce->rx_max_coalesced_frames =
		((rspq->intr_params & QINTR_CNT_EN_F)
		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
		 : 0);
	return 0;
}

/*
 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
 * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
 * the interrupt holdoff timer on any of the device's Queue Sets.
 */
static int cxgb4vf_set_coalesce(struct net_device *dev,
				struct ethtool_coalesce *coalesce)
{
	const struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;

	return set_rxq_intr_params(adapter,
				   &adapter->sge.ethrxq[pi->first_qset].rspq,
				   coalesce->rx_coalesce_usecs,
				   coalesce->rx_max_coalesced_frames);
}

/*
 * Report current port link pause parameter settings.
 */
static void cxgb4vf_get_pauseparam(struct net_device *dev,
				   struct ethtool_pauseparam *pauseparam)
{
	struct port_info *pi = netdev_priv(dev);

	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
	pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
	pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
}

/*
 * Identify the port by blinking the port's LED.
 */
static int cxgb4vf_phys_id(struct net_device *dev,
			   enum ethtool_phys_id_state state)
{
	unsigned int val;
	struct port_info *pi = netdev_priv(dev);

	if (state == ETHTOOL_ID_ACTIVE)
		val = 0xffff;
	else if (state == ETHTOOL_ID_INACTIVE)
		val = 0;
	else
		return -EINVAL;

	return t4vf_identify_port(pi->adapter, pi->viid, val);
}

/*
 * Port stats maintained per queue of the port.
 */
struct queue_port_stats {
	u64 tso;
	u64 tx_csum;
	u64 rx_csum;
	u64 vlan_ex;
	u64 vlan_ins;
	u64 lro_pkts;
	u64 lro_merged;
};

/*
 * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
 * these need to match the order of statistics returned by
 * t4vf_get_port_stats().
 */
static const char stats_strings[][ETH_GSTRING_LEN] = {
	/*
	 * These must match the layout of the t4vf_port_stats structure.
	 */
	"TxBroadcastBytes  ",
	"TxBroadcastFrames ",
	"TxMulticastBytes  ",
	"TxMulticastFrames ",
	"TxUnicastBytes    ",
	"TxUnicastFrames   ",
	"TxDroppedFrames   ",
	"TxOffloadBytes    ",
	"TxOffloadFrames   ",
	"RxBroadcastBytes  ",
	"RxBroadcastFrames ",
	"RxMulticastBytes  ",
	"RxMulticastFrames ",
	"RxUnicastBytes    ",
	"RxUnicastFrames   ",
	"RxErrorFrames     ",

	/*
	 * These are accumulated per-queue statistics and must match the
	 * order of the fields in the queue_port_stats structure.
	 */
	"TSO               ",
	"TxCsumOffload     ",
	"RxCsumGood        ",
	"VLANextractions   ",
	"VLANinsertions    ",
	"GROPackets        ",
	"GROMerged         ",
};

/*
 * Return the number of statistics in the specified statistics set.
 */
static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(stats_strings);
	default:
		return -EOPNOTSUPP;
	}
	/*NOTREACHED*/
}

/*
 * Return the strings for the specified statistics set.
 */
static void cxgb4vf_get_strings(struct net_device *dev,
				u32 sset,
				u8 *data)
{
	switch (sset) {
	case ETH_SS_STATS:
		memcpy(data, stats_strings, sizeof(stats_strings));
		break;
	}
}

/*
 * Small utility routine to accumulate queue statistics across the queues of
 * a "port".
 */
static void collect_sge_port_stats(const struct adapter *adapter,
				   const struct port_info *pi,
				   struct queue_port_stats *stats)
{
	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
	int qs;

	memset(stats, 0, sizeof(*stats));
	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
		stats->tso += txq->tso;
		stats->tx_csum += txq->tx_cso;
		stats->rx_csum += rxq->stats.rx_cso;
		stats->vlan_ex += rxq->stats.vlan_ex;
		stats->vlan_ins += txq->vlan_ins;
		stats->lro_pkts += rxq->stats.lro_pkts;
		stats->lro_merged += rxq->stats.lro_merged;
	}
}

/*
 * Return the ETH_SS_STATS statistics set.
 */
static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
				      struct ethtool_stats *stats,
				      u64 *data)
{
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adapter = pi->adapter;
	int err = t4vf_get_port_stats(adapter, pi->pidx,
				      (struct t4vf_port_stats *)data);
	if (err)
		memset(data, 0, sizeof(struct t4vf_port_stats));

	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
}

/*
 * Return the size of our register map.
 */
static int cxgb4vf_get_regs_len(struct net_device *dev)
{
	return T4VF_REGMAP_SIZE;
}

/*
 * Dump a block of registers, start to end inclusive, into a buffer.
 */
static void reg_block_dump(struct adapter *adapter, void *regbuf,
			   unsigned int start, unsigned int end)
{
	u32 *bp = regbuf + start - T4VF_REGMAP_START;

	for ( ; start <= end; start += sizeof(u32)) {
		/*
		 * Avoid reading the Mailbox Control register since that
		 * can trigger a Mailbox Ownership Arbitration cycle and
		 * interfere with communication with the firmware.
		 */
		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
			*bp++ = 0xffff;
		else
			*bp++ = t4_read_reg(adapter, start);
	}
}

/*
 * Copy our entire register map into the provided buffer.
 */
static void cxgb4vf_get_regs(struct net_device *dev,
			     struct ethtool_regs *regs,
			     void *regbuf)
{
	struct adapter *adapter = netdev2adap(dev);

	regs->version = mk_adap_vers(adapter);

	/*
	 * Fill in register buffer with our register map.
	 */
	memset(regbuf, 0, T4VF_REGMAP_SIZE);

	reg_block_dump(adapter, regbuf,
		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
	reg_block_dump(adapter, regbuf,
		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);

	/* T5 adds new registers in the PL Register map.
	 */
	reg_block_dump(adapter, regbuf,
		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
	reg_block_dump(adapter, regbuf,
		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);

	reg_block_dump(adapter, regbuf,
		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
}

/*
 * Report current Wake On LAN settings.
 */
static void cxgb4vf_get_wol(struct net_device *dev,
			    struct ethtool_wolinfo *wol)
{
	wol->supported = 0;
	wol->wolopts = 0;
	memset(&wol->sopass, 0, sizeof(wol->sopass));
}

/*
 * TCP Segmentation Offload flags which we support.
 */
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)

static const struct ethtool_ops cxgb4vf_ethtool_ops = {
	.get_link_ksettings	= cxgb4vf_get_link_ksettings,
	.get_drvinfo		= cxgb4vf_get_drvinfo,
	.get_msglevel		= cxgb4vf_get_msglevel,
	.set_msglevel		= cxgb4vf_set_msglevel,
	.get_ringparam		= cxgb4vf_get_ringparam,
	.set_ringparam		= cxgb4vf_set_ringparam,
	.get_coalesce		= cxgb4vf_get_coalesce,
	.set_coalesce		= cxgb4vf_set_coalesce,
	.get_pauseparam		= cxgb4vf_get_pauseparam,
	.get_link		= ethtool_op_get_link,
	.get_strings		= cxgb4vf_get_strings,
	.set_phys_id		= cxgb4vf_phys_id,
	.get_sset_count		= cxgb4vf_get_sset_count,
	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
	.get_regs_len		= cxgb4vf_get_regs_len,
	.get_regs		= cxgb4vf_get_regs,
	.get_wol		= cxgb4vf_get_wol,
};

/*
 * /sys/kernel/debug/cxgb4vf support code and data.
 * ================================================
 */

/*
 * Show Firmware Mailbox Command/Reply Log
 *
 * Note that we don't do any locking when dumping the Firmware Mailbox Log so
 * it's possible that we can catch things during a log update and therefore
 * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
 * If we ever decide that we want to make sure that we're dumping a coherent
 * log, we'd need to perform locking in the mailbox logging and in
 * mboxlog_open() where we'd need to grab the entire mailbox log in one go
 * like we do for the Firmware Device Log.  But as stated above, meh ...
 */
static int mboxlog_show(struct seq_file *seq, void *v)
{
	struct adapter *adapter = seq->private;
	struct mbox_cmd_log *log = adapter->mbox_log;
	struct mbox_cmd *entry;
	int entry_idx, i;

	if (v == SEQ_START_TOKEN) {
		seq_printf(seq,
			   "%10s  %15s  %5s  %5s  %s\n",
			   "Seq#", "Tstamp", "Atime", "Etime",
			   "Command/Reply");
		return 0;
	}

	entry_idx = log->cursor + ((uintptr_t)v - 2);
	if (entry_idx >= log->size)
		entry_idx -= log->size;
	entry = mbox_cmd_log_entry(log, entry_idx);

	/* skip over unused entries */
	if (entry->timestamp == 0)
		return 0;

	seq_printf(seq, "%10u  %15llu  %5d  %5d",
		   entry->seqno, entry->timestamp,
		   entry->access, entry->execute);
	for (i = 0; i < MBOX_LEN / 8; i++) {
		u64 flit = entry->cmd[i];
		u32 hi = (u32)(flit >> 32);
		u32 lo = (u32)flit;

		seq_printf(seq, "  %08x %08x", hi, lo);
	}
	seq_puts(seq, "\n");
	return 0;
}

static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
{
	struct adapter *adapter = seq->private;
	struct mbox_cmd_log *log = adapter->mbox_log;

	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
}

static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
{
	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
}

static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return mboxlog_get_idx(seq, *pos);
}

static void mboxlog_stop(struct seq_file *seq, void *v)
{
}

static const struct seq_operations mboxlog_seq_ops = {
	.start = mboxlog_start,
	.next  = mboxlog_next,
	.stop  = mboxlog_stop,
	.show  = mboxlog_show
};

static int mboxlog_open(struct inode *inode, struct file *file)
{
	int res = seq_open(file, &mboxlog_seq_ops);

	if (!res) {
		struct seq_file *seq = file->private_data;

		seq->private = inode->i_private;
	}
	return res;
}

static const struct file_operations mboxlog_fops = {
	.owner   = THIS_MODULE,
	.open    = mboxlog_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

/*
 * Show SGE Queue Set information.  We display QPL Queues Sets per line.
 */
#define QPL	4

static int sge_qinfo_show(struct seq_file *seq, void *v)
{
	struct adapter *adapter = seq->private;
	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
	int qs, r = (uintptr_t)v - 1;

	if (r)
		seq_putc(seq, '\n');

	#define S3(fmt_spec, s, v) \
		do {\
			seq_printf(seq, "%-12s", s); \
			for (qs = 0; qs < n; ++qs) \
				seq_printf(seq, " %16" fmt_spec, v); \
			seq_putc(seq, '\n'); \
		} while (0)
	#define S(s, v)		S3("s", s, v)
	#define T(s, v)		S3("u", s, txq[qs].v)
	#define R(s, v)		S3("u", s, rxq[qs].v)

	if (r < eth_entries) {
		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
		int n = min(QPL, adapter->sge.ethqsets - QPL * r);

		S("QType:", "Ethernet");
		S("Interface:",
		  (rxq[qs].rspq.netdev
		   ? rxq[qs].rspq.netdev->name
		   : "N/A"));
		S3("d", "Port:",
		   (rxq[qs].rspq.netdev
		    ? ((struct port_info *)
		       netdev_priv(rxq[qs].rspq.netdev))->port_id
		    : -1));
		T("TxQ ID:", q.abs_id);
		T("TxQ size:", q.size);
		T("TxQ inuse:", q.in_use);
		T("TxQ PIdx:", q.pidx);
		T("TxQ CIdx:", q.cidx);
		R("RspQ ID:", rspq.abs_id);
		R("RspQ size:", rspq.size);
		R("RspQE size:", rspq.iqe_len);
		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
		S3("u", "Intr pktcnt:",
		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
		R("RspQ CIdx:", rspq.cidx);
		R("RspQ Gen:", rspq.gen);
		R("FL ID:", fl.abs_id);
		R("FL size:", fl.size - MIN_FL_RESID);
		R("FL avail:", fl.avail);
		R("FL PIdx:", fl.pidx);
		R("FL CIdx:", fl.cidx);
		return 0;
	}

	r -= eth_entries;
	if (r == 0) {
		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;

		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
			   qtimer_val(adapter, evtq));
		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
			   adapter->sge.counter_val[evtq->pktcnt_idx]);
		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
	} else if (r == 1) {
		const struct sge_rspq *intrq = &adapter->sge.intrq;

		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
			   qtimer_val(adapter, intrq));
		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
			   adapter->sge.counter_val[intrq->pktcnt_idx]);
		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
	}

	#undef R
	#undef T
	#undef S
	#undef S3

	return 0;
}

/*
 * Return the number of "entries" in our "file".  We group the multi-Queue
 * sections with QPL Queue Sets per "entry".  The sections of the output are:
 *
 *     Ethernet RX/TX Queue Sets
 *     Firmware Event Queue
 *     Forwarded Interrupt Queue (if in MSI mode)
 */
static int sge_queue_entries(const struct adapter *adapter)
{
	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
		((adapter->flags & USING_MSI) != 0);
}

static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
{
	int entries = sge_queue_entries(seq->private);

	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}

static void sge_queue_stop(struct seq_file *seq, void *v)
{
}

static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
{
	int entries = sge_queue_entries(seq->private);

	++*pos;
	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}

static const struct seq_operations sge_qinfo_seq_ops = {
	.start = sge_queue_start,
	.next  = sge_queue_next,
	.stop  = sge_queue_stop,
	.show  = sge_qinfo_show
};

static int sge_qinfo_open(struct inode *inode, struct file *file)
{
	int res = seq_open(file, &sge_qinfo_seq_ops);

	if (!res) {
		struct seq_file *seq = file->private_data;
		seq->private = inode->i_private;
	}
	return res;
}

static const struct file_operations sge_qinfo_debugfs_fops = {
	.owner   = THIS_MODULE,
	.open    = sge_qinfo_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

/*
 * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
 */
#define QPL	4

static int sge_qstats_show(struct seq_file *seq, void *v)
{
	struct adapter *adapter = seq->private;
	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
	int qs, r = (uintptr_t)v - 1;

	if (r)
		seq_putc(seq, '\n');

	#define S3(fmt, s, v) \
		do { \
			seq_printf(seq, "%-16s", s); \
			for (qs = 0; qs < n; ++qs) \
				seq_printf(seq, " %8" fmt, v); \
			seq_putc(seq, '\n'); \
		} while (0)
	#define S(s, v)		S3("s", s, v)

	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
	#define T(s, v)		T3("lu", s, v)

	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
	#define R(s, v)		R3("lu", s, v)

	if (r < eth_entries) {
		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
		int n = min(QPL, adapter->sge.ethqsets - QPL * r);

		S("QType:", "Ethernet");
		S("Interface:",
		  (rxq[qs].rspq.netdev
		   ? rxq[qs].rspq.netdev->name
		   : "N/A"));
		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
		R("RxPackets:", stats.pkts);
		R("RxCSO:", stats.rx_cso);
		R("VLANxtract:", stats.vlan_ex);
		R("LROmerged:", stats.lro_merged);
		R("LROpackets:", stats.lro_pkts);
		R("RxDrops:", stats.rx_drops);
		T("TSO:", tso);
		T("TxCSO:", tx_cso);
		T("VLANins:", vlan_ins);
		T("TxQFull:", q.stops);
		T("TxQRestarts:", q.restarts);
		T("TxMapErr:", mapping_err);
		R("FLAllocErr:", fl.alloc_failed);
		R("FLLrgAlcErr:", fl.large_alloc_failed);
		R("FLStarving:", fl.starving);
		return 0;
	}

	r -= eth_entries;
	if (r == 0) {
		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;

		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
			   evtq->unhandled_irqs);
		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
	} else if (r == 1) {
		const struct sge_rspq *intrq = &adapter->sge.intrq;

		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
			   intrq->unhandled_irqs);
		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
	}

	#undef R
	#undef T
	#undef S
	#undef R3
	#undef T3
	#undef S3

	return 0;
}

/*
 * Return the number of "entries" in our "file".  We group the multi-Queue
 * sections with QPL Queue Sets per "entry".  The sections of the output are:
 *
 *     Ethernet RX/TX Queue Sets
 *     Firmware Event Queue
 *     Forwarded Interrupt Queue (if in MSI mode)
 */
static int sge_qstats_entries(const struct adapter *adapter)
{
	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
		((adapter->flags & USING_MSI) != 0);
}

static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
{
	int entries = sge_qstats_entries(seq->private);

	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}

static void sge_qstats_stop(struct seq_file *seq, void *v)
{
}

static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
{
	int entries = sge_qstats_entries(seq->private);

	(*pos)++;
	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}

static const struct seq_operations sge_qstats_seq_ops = {
	.start = sge_qstats_start,
	.next  = sge_qstats_next,
	.stop  = sge_qstats_stop,
	.show  = sge_qstats_show
};

static int sge_qstats_open(struct inode *inode, struct file *file)
{
	int res = seq_open(file, &sge_qstats_seq_ops);

	if (res == 0) {
		struct seq_file *seq = file->private_data;
		seq->private = inode->i_private;
	}
	return res;
}

static const struct file_operations sge_qstats_proc_fops = {
	.owner   = THIS_MODULE,
	.open    = sge_qstats_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

/*
 * Show PCI-E SR-IOV Virtual Function Resource Limits.
 */
static int resources_show(struct seq_file *seq, void *v)
{
	struct adapter *adapter = seq->private;
	struct vf_resources *vfres = &adapter->params.vfres;

	#define S(desc, fmt, var) \
		seq_printf(seq, "%-60s " fmt "\n", \
			   desc " (" #var "):", vfres->var)

	S("Virtual Interfaces", "%d", nvi);
	S("Egress Queues", "%d", neq);
	S("Ethernet Control", "%d", nethctrl);
	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
	S("Ingress Queues", "%d", niq);
	S("Traffic Class", "%d", tc);
	S("Port Access Rights Mask", "%#x", pmask);
	S("MAC Address Filters", "%d", nexactf);
	S("Firmware Command Read Capabilities", "%#x", r_caps);
	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);

	#undef S

	return 0;
}

static int resources_open(struct inode *inode, struct file *file)
{
	return single_open(file, resources_show, inode->i_private);
}

static const struct file_operations resources_proc_fops = {
	.owner   = THIS_MODULE,
	.open    = resources_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = single_release,
};

/*
 * Show Virtual Interfaces.
 */
static int interfaces_show(struct seq_file *seq, void *v)
{
	if (v == SEQ_START_TOKEN) {
		seq_puts(seq, "Interface  Port   VIID\n");
	} else {
		struct adapter *adapter = seq->private;
		int pidx = (uintptr_t)v - 2;
		struct net_device *dev = adapter->port[pidx];
		struct port_info *pi = netdev_priv(dev);

		seq_printf(seq, "%9s  %4d  %#5x\n",
			   dev->name, pi->port_id, pi->viid);
	}
	return 0;
}

static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
{
	return pos <= adapter->params.nports
		? (void *)(uintptr_t)(pos + 1)
		: NULL;
}

static void *interfaces_start(struct seq_file *seq, loff_t *pos)
{
	return *pos
		? interfaces_get_idx(seq->private, *pos)
		: SEQ_START_TOKEN;
}

static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
{
	(*pos)++;
	return interfaces_get_idx(seq->private, *pos);
}

static void interfaces_stop(struct seq_file *seq, void *v)
{
}

static const struct seq_operations interfaces_seq_ops = {
	.start = interfaces_start,
	.next  = interfaces_next,
	.stop  = interfaces_stop,
	.show  = interfaces_show
};

static int interfaces_open(struct inode *inode, struct file *file)
{
	int res = seq_open(file, &interfaces_seq_ops);

	if (res == 0) {
		struct seq_file *seq = file->private_data;
		seq->private = inode->i_private;
	}
	return res;
}

static const struct file_operations interfaces_proc_fops = {
	.owner   = THIS_MODULE,
	.open    = interfaces_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

/*
 * /sys/kernel/debugfs/cxgb4vf/ files list.
 */
struct cxgb4vf_debugfs_entry {
	const char *name;		/* name of debugfs node */
	umode_t mode;			/* file system mode */
	const struct file_operations *fops;
};

static struct cxgb4vf_debugfs_entry debugfs_files[] = {
	{ "mboxlog",    S_IRUGO, &mboxlog_fops },
	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
	{ "resources",  S_IRUGO, &resources_proc_fops },
	{ "interfaces", S_IRUGO, &interfaces_proc_fops },
};

/*
 * Module and device initialization and cleanup code.
 * ==================================================
 */

/*
 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
 * directory (debugfs_root) has already been set up.
 */
static int setup_debugfs(struct adapter *adapter)
{
	int i;

	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));

	/*
	 * Debugfs support is best effort.
	 */
	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
		(void)debugfs_create_file(debugfs_files[i].name,
				  debugfs_files[i].mode,
				  adapter->debugfs_root,
				  (void *)adapter,
				  debugfs_files[i].fops);

	return 0;
}

/*
 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
 * it to our caller to tear down the directory (debugfs_root).
 */
static void cleanup_debugfs(struct adapter *adapter)
{
	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));

	/*
	 * Unlike our sister routine cleanup_proc(), we don't need to remove
	 * individual entries because a call will be made to
	 * debugfs_remove_recursive().  We just need to clean up any ancillary
	 * persistent state.
	 */
	/* nothing to do */
}

/* Figure out how many Ports and Queue Sets we can support.  This depends on
 * knowing our Virtual Function Resources and may be called a second time if
 * we fall back from MSI-X to MSI Interrupt Mode.
 */
static void size_nports_qsets(struct adapter *adapter)
{
	struct vf_resources *vfres = &adapter->params.vfres;
	unsigned int ethqsets, pmask_nports;

	/* The number of "ports" which we support is equal to the number of
	 * Virtual Interfaces with which we've been provisioned.
	 */
	adapter->params.nports = vfres->nvi;
	if (adapter->params.nports > MAX_NPORTS) {
		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
			 " allowed virtual interfaces\n", MAX_NPORTS,
			 adapter->params.nports);
		adapter->params.nports = MAX_NPORTS;
	}

	/* We may have been provisioned with more VIs than the number of
	 * ports we're allowed to access (our Port Access Rights Mask).
	 * This is obviously a configuration conflict but we don't want to
	 * crash the kernel or anything silly just because of that.
	 */
	pmask_nports = hweight32(adapter->params.vfres.pmask);
	if (pmask_nports < adapter->params.nports) {
		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
			 " virtual interfaces; limited by Port Access Rights"
			 " mask %#x\n", pmask_nports, adapter->params.nports,
			 adapter->params.vfres.pmask);
		adapter->params.nports = pmask_nports;
	}

	/* We need to reserve an Ingress Queue for the Asynchronous Firmware
	 * Event Queue.  And if we're using MSI Interrupts, we'll also need to
	 * reserve an Ingress Queue for a Forwarded Interrupts.
	 *
	 * The rest of the FL/Intr-capable ingress queues will be matched up
	 * one-for-one with Ethernet/Control egress queues in order to form
	 * "Queue Sets" which will be aportioned between the "ports".  For
	 * each Queue Set, we'll need the ability to allocate two Egress
	 * Contexts -- one for the Ingress Queue Free List and one for the TX
	 * Ethernet Queue.
	 *
	 * Note that even if we're currently configured to use MSI-X
	 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
	 * to MSI Interrupts if we can't get enough MSI-X Interrupts.  If that
	 * happens we'll need to adjust things later.
	 */
	ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
	if (vfres->nethctrl != ethqsets)
		ethqsets = min(vfres->nethctrl, ethqsets);
	if (vfres->neq < ethqsets*2)
		ethqsets = vfres->neq/2;
	if (ethqsets > MAX_ETH_QSETS)
		ethqsets = MAX_ETH_QSETS;
	adapter->sge.max_ethqsets = ethqsets;

	if (adapter->sge.max_ethqsets < adapter->params.nports) {
		dev_warn(adapter->pdev_dev, "only using %d of %d available"
			 " virtual interfaces (too few Queue Sets)\n",
			 adapter->sge.max_ethqsets, adapter->params.nports);
		adapter->params.nports = adapter->sge.max_ethqsets;
	}
}

/*
 * Perform early "adapter" initialization.  This is where we discover what
 * adapter parameters we're going to be using and initialize basic adapter
 * hardware support.
 */
static int adap_init0(struct adapter *adapter)
{
	struct sge_params *sge_params = &adapter->params.sge;
	struct sge *s = &adapter->sge;
	int err;
	u32 param, val = 0;

	/*
	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
	 * 2.6.31 and later we can't call pci_reset_function() in order to
	 * issue an FLR because of a self- deadlock on the device semaphore.
	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
	 * cases where they're needed -- for instance, some versions of KVM
	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
	 * use the firmware based reset in order to reset any per function
	 * state.
	 */
	err = t4vf_fw_reset(adapter);
	if (err < 0) {
		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
		return err;
	}

	/*
	 * Grab basic operational parameters.  These will predominantly have
	 * been set up by the Physical Function Driver or will be hard coded
	 * into the adapter.  We just have to live with them ...  Note that
	 * we _must_ get our VPD parameters before our SGE parameters because
	 * we need to know the adapter's core clock from the VPD in order to
	 * properly decode the SGE Timer Values.
	 */
	err = t4vf_get_dev_params(adapter);
	if (err) {
		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
			" device parameters: err=%d\n", err);
		return err;
	}
	err = t4vf_get_vpd_params(adapter);
	if (err) {
		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
			" VPD parameters: err=%d\n", err);
		return err;
	}
	err = t4vf_get_sge_params(adapter);
	if (err) {
		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
			" SGE parameters: err=%d\n", err);
		return err;
	}
	err = t4vf_get_rss_glb_config(adapter);
	if (err) {
		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
			" RSS parameters: err=%d\n", err);
		return err;
	}
	if (adapter->params.rss.mode !=
	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
			" mode %d\n", adapter->params.rss.mode);
		return -EINVAL;
	}
	err = t4vf_sge_init(adapter);
	if (err) {
		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
			" err=%d\n", err);
		return err;
	}

	/* If we're running on newer firmware, let it know that we're
	 * prepared to deal with encapsulated CPL messages.  Older
	 * firmware won't understand this and we'll just get
	 * unencapsulated messages ...
	 */
	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
	val = 1;
	(void) t4vf_set_params(adapter, 1, &param, &val);

	/*
	 * Retrieve our RX interrupt holdoff timer values and counter
	 * threshold values from the SGE parameters.
	 */
	s->timer_val[0] = core_ticks_to_us(adapter,
		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
	s->timer_val[1] = core_ticks_to_us(adapter,
		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
	s->timer_val[2] = core_ticks_to_us(adapter,
		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
	s->timer_val[3] = core_ticks_to_us(adapter,
		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
	s->timer_val[4] = core_ticks_to_us(adapter,
		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
	s->timer_val[5] = core_ticks_to_us(adapter,
		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));

	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);

	/*
	 * Grab our Virtual Interface resource allocation, extract the
	 * features that we're interested in and do a bit of sanity testing on
	 * what we discover.
	 */
	err = t4vf_get_vfres(adapter);
	if (err) {
		dev_err(adapter->pdev_dev, "unable to get virtual interface"
			" resources: err=%d\n", err);
		return err;
	}

	/* Check for various parameter sanity issues */
	if (adapter->params.vfres.pmask == 0) {
		dev_err(adapter->pdev_dev, "no port access configured\n"
			"usable!\n");
		return -EINVAL;
	}
	if (adapter->params.vfres.nvi == 0) {
		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
			"usable!\n");
		return -EINVAL;
	}

	/* Initialize nports and max_ethqsets now that we have our Virtual
	 * Function Resources.
	 */
	size_nports_qsets(adapter);

	return 0;
}

static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
			     u8 pkt_cnt_idx, unsigned int size,
			     unsigned int iqe_size)
{
	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
			     (pkt_cnt_idx < SGE_NCOUNTERS ?
			      QINTR_CNT_EN_F : 0));
	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
			    ? pkt_cnt_idx
			    : 0);
	rspq->iqe_len = iqe_size;
	rspq->size = size;
}

/*
 * Perform default configuration of DMA queues depending on the number and
 * type of ports we found and the number of available CPUs.  Most settings can
 * be modified by the admin via ethtool and cxgbtool prior to the adapter
 * being brought up for the first time.
 */
static void cfg_queues(struct adapter *adapter)
{
	struct sge *s = &adapter->sge;
	int q10g, n10g, qidx, pidx, qs;
	size_t iqe_size;

	/*
	 * We should not be called till we know how many Queue Sets we can
	 * support.  In particular, this means that we need to know what kind
	 * of interrupts we'll be using ...
	 */
	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);

	/*
	 * Count the number of 10GbE Virtual Interfaces that we have.
	 */
	n10g = 0;
	for_each_port(adapter, pidx)
		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);

	/*
	 * We default to 1 queue per non-10G port and up to # of cores queues
	 * per 10G port.
	 */
	if (n10g == 0)
		q10g = 0;
	else {
		int n1g = (adapter->params.nports - n10g);
		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
		if (q10g > num_online_cpus())
			q10g = num_online_cpus();
	}

	/*
	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
	 * The layout will be established in setup_sge_queues() when the
	 * adapter is brough up for the first time.
	 */
	qidx = 0;
	for_each_port(adapter, pidx) {
		struct port_info *pi = adap2pinfo(adapter, pidx);

		pi->first_qset = qidx;
		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
		qidx += pi->nqsets;
	}
	s->ethqsets = qidx;

	/*
	 * The Ingress Queue Entry Size for our various Response Queues needs
	 * to be big enough to accommodate the largest message we can receive
	 * from the chip/firmware; which is 64 bytes ...
	 */
	iqe_size = 64;

	/*
	 * Set up default Queue Set parameters ...  Start off with the
	 * shortest interrupt holdoff timer.
	 */
	for (qs = 0; qs < s->max_ethqsets; qs++) {
		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
		struct sge_eth_txq *txq = &s->ethtxq[qs];

		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
		rxq->fl.size = 72;
		txq->q.size = 1024;
	}

	/*
	 * The firmware event queue is used for link state changes and
	 * notifications of TX DMA completions.
	 */
	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);

	/*
	 * The forwarded interrupt queue is used when we're in MSI interrupt
	 * mode.  In this mode all interrupts associated with RX queues will
	 * be forwarded to a single queue which we'll associate with our MSI
	 * interrupt vector.  The messages dropped in the forwarded interrupt
	 * queue will indicate which ingress queue needs servicing ...  This
	 * queue needs to be large enough to accommodate all of the ingress
	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
	 * from equalling the CIDX if every ingress queue has an outstanding
	 * interrupt).  The queue doesn't need to be any larger because no
	 * ingress queue will ever have more than one outstanding interrupt at
	 * any time ...
	 */
	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
		  iqe_size);
}

/*
 * Reduce the number of Ethernet queues across all ports to at most n.
 * n provides at least one queue per port.
 */
static void reduce_ethqs(struct adapter *adapter, int n)
{
	int i;
	struct port_info *pi;

	/*
	 * While we have too many active Ether Queue Sets, interate across the
	 * "ports" and reduce their individual Queue Set allocations.
	 */
	BUG_ON(n < adapter->params.nports);
	while (n < adapter->sge.ethqsets)
		for_each_port(adapter, i) {
			pi = adap2pinfo(adapter, i);
			if (pi->nqsets > 1) {
				pi->nqsets--;
				adapter->sge.ethqsets--;
				if (adapter->sge.ethqsets <= n)
					break;
			}
		}

	/*
	 * Reassign the starting Queue Sets for each of the "ports" ...
	 */
	n = 0;
	for_each_port(adapter, i) {
		pi = adap2pinfo(adapter, i);
		pi->first_qset = n;
		n += pi->nqsets;
	}
}

/*
 * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
 * need.  Minimally we need one for every Virtual Interface plus those needed
 * for our "extras".  Note that this process may lower the maximum number of
 * allowed Queue Sets ...
 */
static int enable_msix(struct adapter *adapter)
{
	int i, want, need, nqsets;
	struct msix_entry entries[MSIX_ENTRIES];
	struct sge *s = &adapter->sge;

	for (i = 0; i < MSIX_ENTRIES; ++i)
		entries[i].entry = i;

	/*
	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
	 * plus those needed for our "extras" (for example, the firmware
	 * message queue).  We _need_ at least one "Queue Set" per Virtual
	 * Interface plus those needed for our "extras".  So now we get to see
	 * if the song is right ...
	 */
	want = s->max_ethqsets + MSIX_EXTRAS;
	need = adapter->params.nports + MSIX_EXTRAS;

	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
	if (want < 0)
		return want;

	nqsets = want - MSIX_EXTRAS;
	if (nqsets < s->max_ethqsets) {
		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
			 " for %d Queue Sets\n", nqsets);
		s->max_ethqsets = nqsets;
		if (nqsets < s->ethqsets)
			reduce_ethqs(adapter, nqsets);
	}
	for (i = 0; i < want; ++i)
		adapter->msix_info[i].vec = entries[i].vector;

	return 0;
}

static const struct net_device_ops cxgb4vf_netdev_ops	= {
	.ndo_open		= cxgb4vf_open,
	.ndo_stop		= cxgb4vf_stop,
	.ndo_start_xmit		= t4vf_eth_xmit,
	.ndo_get_stats		= cxgb4vf_get_stats,
	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
	.ndo_change_mtu		= cxgb4vf_change_mtu,
	.ndo_fix_features	= cxgb4vf_fix_features,
	.ndo_set_features	= cxgb4vf_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= cxgb4vf_poll_controller,
#endif
};

/*
 * "Probe" a device: initialize a device and construct all kernel and driver
 * state needed to manage the device.  This routine is called "init_one" in
 * the PF Driver ...
 */
static int cxgb4vf_pci_probe(struct pci_dev *pdev,
			     const struct pci_device_id *ent)
{
	int pci_using_dac;
	int err, pidx;
	unsigned int pmask;
	struct adapter *adapter;
	struct port_info *pi;
	struct net_device *netdev;
	unsigned int pf;

	/*
	 * Print our driver banner the first time we're called to initialize a
	 * device.
	 */
	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);

	/*
	 * Initialize generic PCI device state.
	 */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "cannot enable PCI device\n");
		return err;
	}

	/*
	 * Reserve PCI resources for the device.  If we can't get them some
	 * other driver may have already claimed the device ...
	 */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
		goto err_disable_device;
	}

	/*
	 * Set up our DMA mask: try for 64-bit address masking first and
	 * fall back to 32-bit if we can't get 64 bits ...
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err == 0) {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		if (err) {
			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
				" coherent allocations\n");
			goto err_release_regions;
		}
		pci_using_dac = 1;
	} else {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err != 0) {
			dev_err(&pdev->dev, "no usable DMA configuration\n");
			goto err_release_regions;
		}
		pci_using_dac = 0;
	}

	/*
	 * Enable bus mastering for the device ...
	 */
	pci_set_master(pdev);

	/*
	 * Allocate our adapter data structure and attach it to the device.
	 */
	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
	if (!adapter) {
		err = -ENOMEM;
		goto err_release_regions;
	}
	pci_set_drvdata(pdev, adapter);
	adapter->pdev = pdev;
	adapter->pdev_dev = &pdev->dev;

	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
				    (sizeof(struct mbox_cmd) *
				     T4VF_OS_LOG_MBOX_CMDS),
				    GFP_KERNEL);
	if (!adapter->mbox_log) {
		err = -ENOMEM;
		goto err_free_adapter;
	}
	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;

	/*
	 * Initialize SMP data synchronization resources.
	 */
	spin_lock_init(&adapter->stats_lock);
	spin_lock_init(&adapter->mbox_lock);
	INIT_LIST_HEAD(&adapter->mlist.list);

	/*
	 * Map our I/O registers in BAR0.
	 */
	adapter->regs = pci_ioremap_bar(pdev, 0);
	if (!adapter->regs) {
		dev_err(&pdev->dev, "cannot map device registers\n");
		err = -ENOMEM;
		goto err_free_adapter;
	}

	/* Wait for the device to become ready before proceeding ...
	 */
	err = t4vf_prep_adapter(adapter);
	if (err) {
		dev_err(adapter->pdev_dev, "device didn't become ready:"
			" err=%d\n", err);
		goto err_unmap_bar0;
	}

	/* For T5 and later we want to use the new BAR-based User Doorbells,
	 * so we need to map BAR2 here ...
	 */
	if (!is_t4(adapter->params.chip)) {
		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
					   pci_resource_len(pdev, 2));
		if (!adapter->bar2) {
			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
			err = -ENOMEM;
			goto err_unmap_bar0;
		}
	}
	/*
	 * Initialize adapter level features.
	 */
	adapter->name = pci_name(pdev);
	adapter->msg_enable = dflt_msg_enable;
	err = adap_init0(adapter);
	if (err)
		goto err_unmap_bar;

	/* Initialize hash mac addr list */
	INIT_LIST_HEAD(&adapter->mac_hlist);

	/*
	 * Allocate our "adapter ports" and stitch everything together.
	 */
	pmask = adapter->params.vfres.pmask;
	pf = t4vf_get_pf_from_vf(adapter);
	for_each_port(adapter, pidx) {
		int port_id, viid;
		u8 mac[ETH_ALEN];
		unsigned int naddr = 1;

		/*
		 * We simplistically allocate our virtual interfaces
		 * sequentially across the port numbers to which we have
		 * access rights.  This should be configurable in some manner
		 * ...
		 */
		if (pmask == 0)
			break;
		port_id = ffs(pmask) - 1;
		pmask &= ~(1 << port_id);
		viid = t4vf_alloc_vi(adapter, port_id);
		if (viid < 0) {
			dev_err(&pdev->dev, "cannot allocate VI for port %d:"
				" err=%d\n", port_id, viid);
			err = viid;
			goto err_free_dev;
		}

		/*
		 * Allocate our network device and stitch things together.
		 */
		netdev = alloc_etherdev_mq(sizeof(struct port_info),
					   MAX_PORT_QSETS);
		if (netdev == NULL) {
			t4vf_free_vi(adapter, viid);
			err = -ENOMEM;
			goto err_free_dev;
		}
		adapter->port[pidx] = netdev;
		SET_NETDEV_DEV(netdev, &pdev->dev);
		pi = netdev_priv(netdev);
		pi->adapter = adapter;
		pi->pidx = pidx;
		pi->port_id = port_id;
		pi->viid = viid;

		/*
		 * Initialize the starting state of our "port" and register
		 * it.
		 */
		pi->xact_addr_filt = -1;
		netif_carrier_off(netdev);
		netdev->irq = pdev->irq;

		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
		netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			NETIF_F_HIGHDMA;
		netdev->features = netdev->hw_features |
				   NETIF_F_HW_VLAN_CTAG_TX;
		if (pci_using_dac)
			netdev->features |= NETIF_F_HIGHDMA;

		netdev->priv_flags |= IFF_UNICAST_FLT;

		netdev->netdev_ops = &cxgb4vf_netdev_ops;
		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
		netdev->dev_port = pi->port_id;

		/*
		 * Initialize the hardware/software state for the port.
		 */
		err = t4vf_port_init(adapter, pidx);
		if (err) {
			dev_err(&pdev->dev, "cannot initialize port %d\n",
				pidx);
			goto err_free_dev;
		}

		err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
		if (err) {
			dev_err(&pdev->dev,
				"unable to determine MAC ACL address, "
				"continuing anyway.. (status %d)\n", err);
		} else if (naddr && adapter->params.vfres.nvi == 1) {
			struct sockaddr addr;

			ether_addr_copy(addr.sa_data, mac);
			err = cxgb4vf_set_mac_addr(netdev, &addr);
			if (err) {
				dev_err(&pdev->dev,
					"unable to set MAC address %pM\n",
					mac);
				goto err_free_dev;
			}
			dev_info(&pdev->dev,
				 "Using assigned MAC ACL: %pM\n", mac);
		}
	}

	/* See what interrupts we'll be using.  If we've been configured to
	 * use MSI-X interrupts, try to enable them but fall back to using
	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
	 * get MSI interrupts we bail with the error.
	 */
	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
		adapter->flags |= USING_MSIX;
	else {
		if (msi == MSI_MSIX) {
			dev_info(adapter->pdev_dev,
				 "Unable to use MSI-X Interrupts; falling "
				 "back to MSI Interrupts\n");

			/* We're going to need a Forwarded Interrupt Queue so
			 * that may cut into how many Queue Sets we can
			 * support.
			 */
			msi = MSI_MSI;
			size_nports_qsets(adapter);
		}
		err = pci_enable_msi(pdev);
		if (err) {
			dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
				" err=%d\n", err);
			goto err_free_dev;
		}
		adapter->flags |= USING_MSI;
	}

	/* Now that we know how many "ports" we have and what interrupt
	 * mechanism we're going to use, we can configure our queue resources.
	 */
	cfg_queues(adapter);

	/*
	 * The "card" is now ready to go.  If any errors occur during device
	 * registration we do not fail the whole "card" but rather proceed
	 * only with the ports we manage to register successfully.  However we
	 * must register at least one net device.
	 */
	for_each_port(adapter, pidx) {
		struct port_info *pi = netdev_priv(adapter->port[pidx]);
		netdev = adapter->port[pidx];
		if (netdev == NULL)
			continue;

		netif_set_real_num_tx_queues(netdev, pi->nqsets);
		netif_set_real_num_rx_queues(netdev, pi->nqsets);

		err = register_netdev(netdev);
		if (err) {
			dev_warn(&pdev->dev, "cannot register net device %s,"
				 " skipping\n", netdev->name);
			continue;
		}

		set_bit(pidx, &adapter->registered_device_map);
	}
	if (adapter->registered_device_map == 0) {
		dev_err(&pdev->dev, "could not register any net devices\n");
		goto err_disable_interrupts;
	}

	/*
	 * Set up our debugfs entries.
	 */
	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
		adapter->debugfs_root =
			debugfs_create_dir(pci_name(pdev),
					   cxgb4vf_debugfs_root);
		if (IS_ERR_OR_NULL(adapter->debugfs_root))
			dev_warn(&pdev->dev, "could not create debugfs"
				 " directory");
		else
			setup_debugfs(adapter);
	}

	/*
	 * Print a short notice on the existence and configuration of the new
	 * VF network device ...
	 */
	for_each_port(adapter, pidx) {
		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
			 adapter->port[pidx]->name,
			 (adapter->flags & USING_MSIX) ? "MSI-X" :
			 (adapter->flags & USING_MSI)  ? "MSI" : "");
	}

	/*
	 * Return success!
	 */
	return 0;

	/*
	 * Error recovery and exit code.  Unwind state that's been created
	 * so far and return the error.
	 */
err_disable_interrupts:
	if (adapter->flags & USING_MSIX) {
		pci_disable_msix(adapter->pdev);
		adapter->flags &= ~USING_MSIX;
	} else if (adapter->flags & USING_MSI) {
		pci_disable_msi(adapter->pdev);
		adapter->flags &= ~USING_MSI;
	}

err_free_dev:
	for_each_port(adapter, pidx) {
		netdev = adapter->port[pidx];
		if (netdev == NULL)
			continue;
		pi = netdev_priv(netdev);
		t4vf_free_vi(adapter, pi->viid);
		if (test_bit(pidx, &adapter->registered_device_map))
			unregister_netdev(netdev);
		free_netdev(netdev);
	}

err_unmap_bar:
	if (!is_t4(adapter->params.chip))
		iounmap(adapter->bar2);

err_unmap_bar0:
	iounmap(adapter->regs);

err_free_adapter:
	kfree(adapter->mbox_log);
	kfree(adapter);

err_release_regions:
	pci_release_regions(pdev);
	pci_clear_master(pdev);

err_disable_device:
	pci_disable_device(pdev);

	return err;
}

/*
 * "Remove" a device: tear down all kernel and driver state created in the
 * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
 * that this is called "remove_one" in the PF Driver.)
 */
static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
	struct adapter *adapter = pci_get_drvdata(pdev);

	/*
	 * Tear down driver state associated with device.
	 */
	if (adapter) {
		int pidx;

		/*
		 * Stop all of our activity.  Unregister network port,
		 * disable interrupts, etc.
		 */
		for_each_port(adapter, pidx)
			if (test_bit(pidx, &adapter->registered_device_map))
				unregister_netdev(adapter->port[pidx]);
		t4vf_sge_stop(adapter);
		if (adapter->flags & USING_MSIX) {
			pci_disable_msix(adapter->pdev);
			adapter->flags &= ~USING_MSIX;
		} else if (adapter->flags & USING_MSI) {
			pci_disable_msi(adapter->pdev);
			adapter->flags &= ~USING_MSI;
		}

		/*
		 * Tear down our debugfs entries.
		 */
		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
			cleanup_debugfs(adapter);
			debugfs_remove_recursive(adapter->debugfs_root);
		}

		/*
		 * Free all of the various resources which we've acquired ...
		 */
		t4vf_free_sge_resources(adapter);
		for_each_port(adapter, pidx) {
			struct net_device *netdev = adapter->port[pidx];
			struct port_info *pi;

			if (netdev == NULL)
				continue;

			pi = netdev_priv(netdev);
			t4vf_free_vi(adapter, pi->viid);
			free_netdev(netdev);
		}
		iounmap(adapter->regs);
		if (!is_t4(adapter->params.chip))
			iounmap(adapter->bar2);
		kfree(adapter->mbox_log);
		kfree(adapter);
	}

	/*
	 * Disable the device and release its PCI resources.
	 */
	pci_disable_device(pdev);
	pci_clear_master(pdev);
	pci_release_regions(pdev);
}

/*
 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
 * delivery.
 */
static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
{
	struct adapter *adapter;
	int pidx;

	adapter = pci_get_drvdata(pdev);
	if (!adapter)
		return;

	/* Disable all Virtual Interfaces.  This will shut down the
	 * delivery of all ingress packets into the chip for these
	 * Virtual Interfaces.
	 */
	for_each_port(adapter, pidx)
		if (test_bit(pidx, &adapter->registered_device_map))
			unregister_netdev(adapter->port[pidx]);

	/* Free up all Queues which will prevent further DMA and
	 * Interrupts allowing various internal pathways to drain.
	 */
	t4vf_sge_stop(adapter);
	if (adapter->flags & USING_MSIX) {
		pci_disable_msix(adapter->pdev);
		adapter->flags &= ~USING_MSIX;
	} else if (adapter->flags & USING_MSI) {
		pci_disable_msi(adapter->pdev);
		adapter->flags &= ~USING_MSI;
	}

	/*
	 * Free up all Queues which will prevent further DMA and
	 * Interrupts allowing various internal pathways to drain.
	 */
	t4vf_free_sge_resources(adapter);
	pci_set_drvdata(pdev, NULL);
}

/* Macros needed to support the PCI Device ID Table ...
 */
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
#define CH_PCI_DEVICE_ID_FUNCTION	0x8

#define CH_PCI_ID_TABLE_ENTRY(devid) \
		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }

#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }

#include "../cxgb4/t4_pci_id_tbl.h"

MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);

static struct pci_driver cxgb4vf_driver = {
	.name		= KBUILD_MODNAME,
	.id_table	= cxgb4vf_pci_tbl,
	.probe		= cxgb4vf_pci_probe,
	.remove		= cxgb4vf_pci_remove,
	.shutdown	= cxgb4vf_pci_shutdown,
};

/*
 * Initialize global driver state.
 */
static int __init cxgb4vf_module_init(void)
{
	int ret;

	/*
	 * Vet our module parameters.
	 */
	if (msi != MSI_MSIX && msi != MSI_MSI) {
		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
			msi, MSI_MSIX, MSI_MSI);
		return -EINVAL;
	}

	/* Debugfs support is optional, just warn if this fails */
	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
		pr_warn("could not create debugfs entry, continuing\n");

	ret = pci_register_driver(&cxgb4vf_driver);
	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
		debugfs_remove(cxgb4vf_debugfs_root);
	return ret;
}

/*
 * Tear down global driver state.
 */
static void __exit cxgb4vf_module_exit(void)
{
	pci_unregister_driver(&cxgb4vf_driver);
	debugfs_remove(cxgb4vf_debugfs_root);
}

module_init(cxgb4vf_module_init);
module_exit(cxgb4vf_module_exit);