Path: blob/main/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
48775 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.3* Copyright (C) 2007 The Regents of the University of California.4* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).5* Written by Brian Behlendorf <[email protected]>.6* UCRL-CODE-2351977*8* This file is part of the SPL, Solaris Porting Layer.9*10* The SPL is free software; you can redistribute it and/or modify it11* under the terms of the GNU General Public License as published by the12* Free Software Foundation; either version 2 of the License, or (at your13* option) any later version.14*15* The SPL is distributed in the hope that it will be useful, but WITHOUT16* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or17* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License18* for more details.19*20* You should have received a copy of the GNU General Public License along21* with the SPL. If not, see <http://www.gnu.org/licenses/>.22*23* Solaris Porting Layer (SPL) Generic Implementation.24*/2526#include <sys/isa_defs.h>27#include <sys/sysmacros.h>28#include <sys/systeminfo.h>29#include <sys/vmsystm.h>30#include <sys/kmem.h>31#include <sys/kmem_cache.h>32#include <sys/vmem.h>33#include <sys/mutex.h>34#include <sys/rwlock.h>35#include <sys/taskq.h>36#include <sys/tsd.h>37#include <sys/zmod.h>38#include <sys/debug.h>39#include <sys/proc.h>40#include <sys/kstat.h>41#include <sys/file.h>42#include <sys/sunddi.h>43#include <linux/ctype.h>44#include <sys/disp.h>45#include <sys/random.h>46#include <sys/string.h>47#include <linux/kmod.h>48#include <linux/mod_compat.h>49#include <sys/cred.h>50#include <sys/vnode.h>51#include <sys/misc.h>52#include <linux/mod_compat.h>5354unsigned long spl_hostid = 0;55EXPORT_SYMBOL(spl_hostid);5657module_param(spl_hostid, ulong, 0644);58MODULE_PARM_DESC(spl_hostid, "The system hostid.");5960proc_t p0;61EXPORT_SYMBOL(p0);6263/*64* xoshiro256++ 1.0 PRNG by David Blackman and Sebastiano Vigna65*66* "Scrambled Linear Pseudorandom Number Generators∗"67* https://vigna.di.unimi.it/ftp/papers/ScrambledLinear.pdf68*69* random_get_pseudo_bytes() is an API function on Illumos whose sole purpose70* is to provide bytes containing random numbers. It is mapped to /dev/urandom71* on Illumos, which uses a "FIPS 186-2 algorithm". No user of the SPL's72* random_get_pseudo_bytes() needs bytes that are of cryptographic quality, so73* we can implement it using a fast PRNG that we seed using Linux' actual74* equivalent to random_get_pseudo_bytes(). We do this by providing each CPU75* with an independent seed so that all calls to random_get_pseudo_bytes() are76* free of atomic instructions.77*78* A consequence of using a fast PRNG is that using random_get_pseudo_bytes()79* to generate words larger than 256 bits will paradoxically be limited to80* `2^256 - 1` possibilities. This is because we have a sequence of `2^256 - 1`81* 256-bit words and selecting the first will implicitly select the second. If82* a caller finds this behavior undesirable, random_get_bytes() should be used83* instead.84*85* XXX: Linux interrupt handlers that trigger within the critical section86* formed by `s[3] = xp[3];` and `xp[0] = s[0];` and call this function will87* see the same numbers. Nothing in the code currently calls this in an88* interrupt handler, so this is considered to be okay. If that becomes a89* problem, we could create a set of per-cpu variables for interrupt handlers90* and use them when in_interrupt() from linux/preempt_mask.h evaluates to91* true.92*/93static void __percpu *spl_pseudo_entropy;9495/*96* rotl()/spl_rand_next()/spl_rand_jump() are copied from the following CC-097* licensed file:98*99* https://prng.di.unimi.it/xoshiro256plusplus.c100*/101102static inline uint64_t rotl(const uint64_t x, int k)103{104return ((x << k) | (x >> (64 - k)));105}106107static inline uint64_t108spl_rand_next(uint64_t *s)109{110const uint64_t result = rotl(s[0] + s[3], 23) + s[0];111112const uint64_t t = s[1] << 17;113114s[2] ^= s[0];115s[3] ^= s[1];116s[1] ^= s[2];117s[0] ^= s[3];118119s[2] ^= t;120121s[3] = rotl(s[3], 45);122123return (result);124}125126static inline void127spl_rand_jump(uint64_t *s)128{129static const uint64_t JUMP[] = { 0x180ec6d33cfd0aba,1300xd5a61266f0c9392c, 0xa9582618e03fc9aa, 0x39abdc4529b1661c };131132uint64_t s0 = 0;133uint64_t s1 = 0;134uint64_t s2 = 0;135uint64_t s3 = 0;136int i, b;137for (i = 0; i < sizeof (JUMP) / sizeof (*JUMP); i++)138for (b = 0; b < 64; b++) {139if (JUMP[i] & 1ULL << b) {140s0 ^= s[0];141s1 ^= s[1];142s2 ^= s[2];143s3 ^= s[3];144}145(void) spl_rand_next(s);146}147148s[0] = s0;149s[1] = s1;150s[2] = s2;151s[3] = s3;152}153154int155random_get_pseudo_bytes(uint8_t *ptr, size_t len)156{157uint64_t *xp, s[4];158159ASSERT(ptr);160161xp = get_cpu_ptr(spl_pseudo_entropy);162163s[0] = xp[0];164s[1] = xp[1];165s[2] = xp[2];166s[3] = xp[3];167168while (len) {169union {170uint64_t ui64;171uint8_t byte[sizeof (uint64_t)];172}entropy;173int i = MIN(len, sizeof (uint64_t));174175len -= i;176entropy.ui64 = spl_rand_next(s);177178/*179* xoshiro256++ has low entropy lower bytes, so we copy the180* higher order bytes first.181*/182while (i--)183#ifdef _ZFS_BIG_ENDIAN184*ptr++ = entropy.byte[i];185#else186*ptr++ = entropy.byte[7 - i];187#endif188}189190xp[0] = s[0];191xp[1] = s[1];192xp[2] = s[2];193xp[3] = s[3];194195put_cpu_ptr(spl_pseudo_entropy);196197return (0);198}199200201EXPORT_SYMBOL(random_get_pseudo_bytes);202203#if BITS_PER_LONG == 32204205/*206* Support 64/64 => 64 division on a 32-bit platform. While the kernel207* provides a div64_u64() function for this we do not use it because the208* implementation is flawed. There are cases which return incorrect209* results as late as linux-2.6.35. Until this is fixed upstream the210* spl must provide its own implementation.211*212* This implementation is a slightly modified version of the algorithm213* proposed by the book 'Hacker's Delight'. The original source can be214* found here and is available for use without restriction.215*216* http://www.hackersdelight.org/HDcode/newCode/divDouble.c217*/218219/*220* Calculate number of leading of zeros for a 64-bit value.221*/222static int223nlz64(uint64_t x)224{225register int n = 0;226227if (x == 0)228return (64);229230if (x <= 0x00000000FFFFFFFFULL) { n = n + 32; x = x << 32; }231if (x <= 0x0000FFFFFFFFFFFFULL) { n = n + 16; x = x << 16; }232if (x <= 0x00FFFFFFFFFFFFFFULL) { n = n + 8; x = x << 8; }233if (x <= 0x0FFFFFFFFFFFFFFFULL) { n = n + 4; x = x << 4; }234if (x <= 0x3FFFFFFFFFFFFFFFULL) { n = n + 2; x = x << 2; }235if (x <= 0x7FFFFFFFFFFFFFFFULL) { n = n + 1; }236237return (n);238}239240/*241* Newer kernels have a div_u64() function but we define our own242* to simplify portability between kernel versions.243*/244static inline uint64_t245__div_u64(uint64_t u, uint32_t v)246{247(void) do_div(u, v);248return (u);249}250251/*252* Turn off missing prototypes warning for these functions. They are253* replacements for libgcc-provided functions and will never be called254* directly.255*/256#if defined(__GNUC__) && !defined(__clang__)257#pragma GCC diagnostic push258#pragma GCC diagnostic ignored "-Wmissing-prototypes"259#endif260261/*262* Implementation of 64-bit unsigned division for 32-bit machines.263*264* First the procedure takes care of the case in which the divisor is a265* 32-bit quantity. There are two subcases: (1) If the left half of the266* dividend is less than the divisor, one execution of do_div() is all that267* is required (overflow is not possible). (2) Otherwise it does two268* divisions, using the grade school method.269*/270uint64_t271__udivdi3(uint64_t u, uint64_t v)272{273uint64_t u0, u1, v1, q0, q1, k;274int n;275276if (v >> 32 == 0) { // If v < 2**32:277if (u >> 32 < v) { // If u/v cannot overflow,278return (__div_u64(u, v)); // just do one division.279} else { // If u/v would overflow:280u1 = u >> 32; // Break u into two halves.281u0 = u & 0xFFFFFFFF;282q1 = __div_u64(u1, v); // First quotient digit.283k = u1 - q1 * v; // First remainder, < v.284u0 += (k << 32);285q0 = __div_u64(u0, v); // Seconds quotient digit.286return ((q1 << 32) + q0);287}288} else { // If v >= 2**32:289n = nlz64(v); // 0 <= n <= 31.290v1 = (v << n) >> 32; // Normalize divisor, MSB is 1.291u1 = u >> 1; // To ensure no overflow.292q1 = __div_u64(u1, v1); // Get quotient from293q0 = (q1 << n) >> 31; // Undo normalization and294// division of u by 2.295if (q0 != 0) // Make q0 correct or296q0 = q0 - 1; // too small by 1.297if ((u - q0 * v) >= v)298q0 = q0 + 1; // Now q0 is correct.299300return (q0);301}302}303EXPORT_SYMBOL(__udivdi3);304305#ifndef abs64306/* CSTYLED */307#define abs64(x) ({ uint64_t t = (x) >> 63; ((x) ^ t) - t; })308#endif309310/*311* Implementation of 64-bit signed division for 32-bit machines.312*/313int64_t314__divdi3(int64_t u, int64_t v)315{316int64_t q, t;317q = __udivdi3(abs64(u), abs64(v));318t = (u ^ v) >> 63; // If u, v have different319return ((q ^ t) - t); // signs, negate q.320}321EXPORT_SYMBOL(__divdi3);322323/*324* Implementation of 64-bit unsigned modulo for 32-bit machines.325*/326uint64_t327__umoddi3(uint64_t dividend, uint64_t divisor)328{329return (dividend - (divisor * __udivdi3(dividend, divisor)));330}331EXPORT_SYMBOL(__umoddi3);332333/* 64-bit signed modulo for 32-bit machines. */334int64_t335__moddi3(int64_t n, int64_t d)336{337int64_t q;338boolean_t nn = B_FALSE;339340if (n < 0) {341nn = B_TRUE;342n = -n;343}344if (d < 0)345d = -d;346347q = __umoddi3(n, d);348349return (nn ? -q : q);350}351EXPORT_SYMBOL(__moddi3);352353/*354* Implementation of 64-bit unsigned division/modulo for 32-bit machines.355*/356uint64_t357__udivmoddi4(uint64_t n, uint64_t d, uint64_t *r)358{359uint64_t q = __udivdi3(n, d);360if (r)361*r = n - d * q;362return (q);363}364EXPORT_SYMBOL(__udivmoddi4);365366/*367* Implementation of 64-bit signed division/modulo for 32-bit machines.368*/369int64_t370__divmoddi4(int64_t n, int64_t d, int64_t *r)371{372int64_t q, rr;373boolean_t nn = B_FALSE;374boolean_t nd = B_FALSE;375if (n < 0) {376nn = B_TRUE;377n = -n;378}379if (d < 0) {380nd = B_TRUE;381d = -d;382}383384q = __udivmoddi4(n, d, (uint64_t *)&rr);385386if (nn != nd)387q = -q;388if (nn)389rr = -rr;390if (r)391*r = rr;392return (q);393}394EXPORT_SYMBOL(__divmoddi4);395396#if defined(__arm) || defined(__arm__)397/*398* Implementation of 64-bit (un)signed division for 32-bit arm machines.399*400* Run-time ABI for the ARM Architecture (page 20). A pair of (unsigned)401* long longs is returned in {{r0, r1}, {r2,r3}}, the quotient in {r0, r1},402* and the remainder in {r2, r3}. The return type is specifically left403* set to 'void' to ensure the compiler does not overwrite these registers404* during the return. All results are in registers as per ABI405*/406void407__aeabi_uldivmod(uint64_t u, uint64_t v)408{409uint64_t res;410uint64_t mod;411412res = __udivdi3(u, v);413mod = __umoddi3(u, v);414{415register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF);416register uint32_t r1 asm("r1") = (res >> 32);417register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF);418register uint32_t r3 asm("r3") = (mod >> 32);419420asm volatile(""421: "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) /* output */422: "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */423424return; /* r0; */425}426}427EXPORT_SYMBOL(__aeabi_uldivmod);428429void430__aeabi_ldivmod(int64_t u, int64_t v)431{432int64_t res;433uint64_t mod;434435res = __divdi3(u, v);436mod = __umoddi3(u, v);437{438register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF);439register uint32_t r1 asm("r1") = (res >> 32);440register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF);441register uint32_t r3 asm("r3") = (mod >> 32);442443asm volatile(""444: "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) /* output */445: "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */446447return; /* r0; */448}449}450EXPORT_SYMBOL(__aeabi_ldivmod);451#endif /* __arm || __arm__ */452453#if defined(__GNUC__) && !defined(__clang__)454#pragma GCC diagnostic pop455#endif456457#endif /* BITS_PER_LONG */458459/*460* NOTE: The strtoxx behavior is solely based on my reading of the Solaris461* ddi_strtol(9F) man page. I have not verified the behavior of these462* functions against their Solaris counterparts. It is possible that I463* may have misinterpreted the man page or the man page is incorrect.464*/465int ddi_strtol(const char *, char **, int, long *);466int ddi_strtoull(const char *, char **, int, unsigned long long *);467int ddi_strtoll(const char *, char **, int, long long *);468469#define define_ddi_strtox(type, valtype) \470int ddi_strto##type(const char *str, char **endptr, \471int base, valtype *result) \472{ \473valtype last_value, value = 0; \474char *ptr = (char *)str; \475int digit, minus = 0; \476\477while (strchr(" \t\n\r\f", *ptr)) \478++ptr; \479\480if (strlen(ptr) == 0) \481return (EINVAL); \482\483switch (*ptr) { \484case '-': \485minus = 1; \486zfs_fallthrough; \487case '+': \488++ptr; \489break; \490} \491\492/* Auto-detect base based on prefix */ \493if (!base) { \494if (str[0] == '0') { \495if (tolower(str[1]) == 'x' && isxdigit(str[2])) { \496base = 16; /* hex */ \497ptr += 2; \498} else if (str[1] >= '0' && str[1] < '8') { \499base = 8; /* octal */ \500ptr += 1; \501} else { \502return (EINVAL); \503} \504} else { \505base = 10; /* decimal */ \506} \507} \508\509while (1) { \510if (isdigit(*ptr)) \511digit = *ptr - '0'; \512else if (isalpha(*ptr)) \513digit = tolower(*ptr) - 'a' + 10; \514else \515break; \516\517if (digit >= base) \518break; \519\520last_value = value; \521value = value * base + digit; \522if (last_value > value) /* Overflow */ \523return (ERANGE); \524\525ptr++; \526} \527\528*result = minus ? -value : value; \529\530if (endptr) \531*endptr = ptr; \532\533return (0); \534} \535536define_ddi_strtox(l, long)537define_ddi_strtox(ull, unsigned long long)538define_ddi_strtox(ll, long long)539540EXPORT_SYMBOL(ddi_strtol);541EXPORT_SYMBOL(ddi_strtoll);542EXPORT_SYMBOL(ddi_strtoull);543544int545ddi_copyin(const void *from, void *to, size_t len, int flags)546{547/* Fake ioctl() issued by kernel, 'from' is a kernel address */548if (flags & FKIOCTL) {549memcpy(to, from, len);550return (0);551}552553return (copyin(from, to, len));554}555EXPORT_SYMBOL(ddi_copyin);556557/*558* Post a uevent to userspace whenever a new vdev adds to the pool. It is559* necessary to sync blkid information with udev, which zed daemon uses560* during device hotplug to identify the vdev.561*/562void563spl_signal_kobj_evt(struct block_device *bdev)564{565#if defined(HAVE_BDEV_KOBJ) || defined(HAVE_PART_TO_DEV)566#ifdef HAVE_BDEV_KOBJ567struct kobject *disk_kobj = bdev_kobj(bdev);568#else569struct kobject *disk_kobj = &part_to_dev(bdev->bd_part)->kobj;570#endif571if (disk_kobj) {572int ret = kobject_uevent(disk_kobj, KOBJ_CHANGE);573if (ret) {574pr_warn("ZFS: Sending event '%d' to kobject: '%s'"575" (%p): failed(ret:%d)\n", KOBJ_CHANGE,576kobject_name(disk_kobj), disk_kobj, ret);577}578}579#else580/*581* This is encountered if neither bdev_kobj() nor part_to_dev() is available582* in the kernel - likely due to an API change that needs to be chased down.583*/584#error "Unsupported kernel: unable to get struct kobj from bdev"585#endif586}587EXPORT_SYMBOL(spl_signal_kobj_evt);588589int590ddi_copyout(const void *from, void *to, size_t len, int flags)591{592/* Fake ioctl() issued by kernel, 'from' is a kernel address */593if (flags & FKIOCTL) {594memcpy(to, from, len);595return (0);596}597598return (copyout(from, to, len));599}600EXPORT_SYMBOL(ddi_copyout);601602static int603spl_getattr(struct file *filp, struct kstat *stat)604{605int rc;606607ASSERT(filp);608ASSERT(stat);609610rc = vfs_getattr(&filp->f_path, stat, STATX_BASIC_STATS,611AT_STATX_SYNC_AS_STAT);612if (rc)613return (-rc);614615return (0);616}617618/*619* Read the unique system identifier from the /etc/hostid file.620*621* The behavior of /usr/bin/hostid on Linux systems with the622* regular eglibc and coreutils is:623*624* 1. Generate the value if the /etc/hostid file does not exist625* or if the /etc/hostid file is less than four bytes in size.626*627* 2. If the /etc/hostid file is at least 4 bytes, then return628* the first four bytes [0..3] in native endian order.629*630* 3. Always ignore bytes [4..] if they exist in the file.631*632* Only the first four bytes are significant, even on systems that633* have a 64-bit word size.634*635* See:636*637* eglibc: sysdeps/unix/sysv/linux/gethostid.c638* coreutils: src/hostid.c639*640* Notes:641*642* The /etc/hostid file on Solaris is a text file that often reads:643*644* # DO NOT EDIT645* "0123456789"646*647* Directly copying this file to Linux results in a constant648* hostid of 4f442023 because the default comment constitutes649* the first four bytes of the file.650*651*/652653static char *spl_hostid_path = HW_HOSTID_PATH;654module_param(spl_hostid_path, charp, 0444);655MODULE_PARM_DESC(spl_hostid_path, "The system hostid file (/etc/hostid)");656657static int658hostid_read(uint32_t *hostid)659{660uint64_t size;661uint32_t value = 0;662int error;663loff_t off;664struct file *filp;665struct kstat stat;666667filp = filp_open(spl_hostid_path, 0, 0);668669if (IS_ERR(filp))670return (ENOENT);671672error = spl_getattr(filp, &stat);673if (error) {674filp_close(filp, 0);675return (error);676}677size = stat.size;678// cppcheck-suppress sizeofwithnumericparameter679if (size < sizeof (HW_HOSTID_MASK)) {680filp_close(filp, 0);681return (EINVAL);682}683684off = 0;685/*686* Read directly into the variable like eglibc does.687* Short reads are okay; native behavior is preserved.688*/689error = kernel_read(filp, &value, sizeof (value), &off);690if (error < 0) {691filp_close(filp, 0);692return (EIO);693}694695/* Mask down to 32 bits like coreutils does. */696*hostid = (value & HW_HOSTID_MASK);697filp_close(filp, 0);698699return (0);700}701702/*703* Return the system hostid. Preferentially use the spl_hostid module option704* when set, otherwise use the value in the /etc/hostid file.705*/706uint32_t707zone_get_hostid(void *zone)708{709uint32_t hostid;710711ASSERT0P(zone);712713if (spl_hostid != 0)714return ((uint32_t)(spl_hostid & HW_HOSTID_MASK));715716if (hostid_read(&hostid) == 0)717return (hostid);718719return (0);720}721EXPORT_SYMBOL(zone_get_hostid);722723static int724spl_kvmem_init(void)725{726int rc = 0;727728rc = spl_kmem_init();729if (rc)730return (rc);731732rc = spl_vmem_init();733if (rc) {734spl_kmem_fini();735return (rc);736}737738return (rc);739}740741/*742* We initialize the random number generator with 128 bits of entropy from the743* system random number generator. In the improbable case that we have a zero744* seed, we fallback to the system jiffies, unless it is also zero, in which745* situation we use a preprogrammed seed. We step forward by 2^64 iterations to746* initialize each of the per-cpu seeds so that the sequences generated on each747* CPU are guaranteed to never overlap in practice.748*/749static int __init750spl_random_init(void)751{752uint64_t s[4];753int i = 0;754755spl_pseudo_entropy = __alloc_percpu(4 * sizeof (uint64_t),756sizeof (uint64_t));757758if (!spl_pseudo_entropy)759return (-ENOMEM);760761get_random_bytes(s, sizeof (s));762763if (s[0] == 0 && s[1] == 0 && s[2] == 0 && s[3] == 0) {764if (jiffies != 0) {765s[0] = jiffies;766s[1] = ~0 - jiffies;767s[2] = ~jiffies;768s[3] = jiffies - ~0;769} else {770(void) memcpy(s, "improbable seed", 16);771}772printk("SPL: get_random_bytes() returned 0 "773"when generating random seed. Setting initial seed to "774"0x%016llx%016llx%016llx%016llx.\n", cpu_to_be64(s[0]),775cpu_to_be64(s[1]), cpu_to_be64(s[2]), cpu_to_be64(s[3]));776}777778for_each_possible_cpu(i) {779uint64_t *wordp = per_cpu_ptr(spl_pseudo_entropy, i);780781spl_rand_jump(s);782783wordp[0] = s[0];784wordp[1] = s[1];785wordp[2] = s[2];786wordp[3] = s[3];787}788789return (0);790}791792static void793spl_random_fini(void)794{795free_percpu(spl_pseudo_entropy);796}797798static void799spl_kvmem_fini(void)800{801spl_vmem_fini();802spl_kmem_fini();803}804805static int __init806spl_init(void)807{808int rc = 0;809810if ((rc = spl_random_init()))811goto out0;812813if ((rc = spl_kvmem_init()))814goto out1;815816if ((rc = spl_tsd_init()))817goto out2;818819if ((rc = spl_proc_init()))820goto out3;821822if ((rc = spl_kstat_init()))823goto out4;824825if ((rc = spl_taskq_init()))826goto out5;827828if ((rc = spl_kmem_cache_init()))829goto out6;830831if ((rc = spl_zlib_init()))832goto out7;833834if ((rc = spl_zone_init()))835goto out8;836837return (rc);838839out8:840spl_zlib_fini();841out7:842spl_kmem_cache_fini();843out6:844spl_taskq_fini();845out5:846spl_kstat_fini();847out4:848spl_proc_fini();849out3:850spl_tsd_fini();851out2:852spl_kvmem_fini();853out1:854spl_random_fini();855out0:856return (rc);857}858859static void __exit860spl_fini(void)861{862spl_zone_fini();863spl_zlib_fini();864spl_kmem_cache_fini();865spl_taskq_fini();866spl_kstat_fini();867spl_proc_fini();868spl_tsd_fini();869spl_kvmem_fini();870spl_random_fini();871}872873module_init(spl_init);874module_exit(spl_fini);875876MODULE_DESCRIPTION("Solaris Porting Layer");877MODULE_AUTHOR(ZFS_META_AUTHOR);878MODULE_LICENSE("GPL");879MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);880881882