Skip to content

Instantly share code, notes, and snippets.

@jclulow
Created November 25, 2011 05:54
Show Gist options
  • Save jclulow/1392892 to your computer and use it in GitHub Desktop.
Save jclulow/1392892 to your computer and use it in GitHub Desktop.
fixer script
diff --git a/usr/src/cmd/boot/bootadm/bootadm_upgrade.c b/usr/src/cmd/boot/bootadm/bootadm_upgrade.c
index ddf28c4..61ac69f 100644
--- a/usr/src/cmd/boot/bootadm/bootadm_upgrade.c
+++ b/usr/src/cmd/boot/bootadm/bootadm_upgrade.c
@@ -238,8 +238,8 @@ is_bfu_system(const char *root)
}
#define MENU_URL(root) (is_bfu_system(root) ? \
- "http://www.sun.com/msg/SUNOS-8000-CF" : \
- "http://www.sun.com/msg/SUNOS-8000-AK")
+ "https://www.illumos.org/msg/SUNOS-8000-CF" : \
+ "https://www.illumos.org/msg/SUNOS-8000-AK")
/*
* Simply allocate a new line and copy in cmd + sep + arg
diff --git a/usr/src/cmd/fm/dicts/ZFS.po b/usr/src/cmd/fm/dicts/ZFS.po
index c88b851..243c34d 100644
--- a/usr/src/cmd/fm/dicts/ZFS.po
+++ b/usr/src/cmd/fm/dicts/ZFS.po
@@ -55,7 +55,7 @@ msgstr "A hot spare will be activated if available."
msgid "ZFS-8000-2Q.impact"
msgstr "The pool is no longer providing the configured level of\n replication."
msgid "ZFS-8000-2Q.action"
-msgstr "\nFor an active pool\n\nIf this error was encountered while running 'zpool import', please see\nthe section below. Otherwise, run 'zpool status -x' to determine which pool has\nexperienced a failure:\n\n\n# zpool status -x\n pool: test\n state: DEGRADED\nstatus: One or more devices could not be opened. Sufficient replicas exist for\n the pool to continue functioning in a degraded state.\naction: Attach the missing device and online it using 'zpool online'.\n see: http://www.sun.com/msg/ZFS-8000-2Q\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test DEGRADED 0 0 0\n mirror DEGRADED 0 0 0\n c0t0d0 ONLINE 0 0 0\n c0t0d1 FAULTED 0 0 0 cannot open\n\nerrors: No known data errors\n\n\nDetermine which device failed to open by looking for a FAULTED device\nwith an additional 'cannot open' message. If this device has been inadvertently\nremoved from the system, attach the device and bring it online with 'zpool\nonline':\n\n\n# zpool online test c0t0d1\n\n\nIf the device is no longer available, the device can be replaced using\nthe 'zpool replace' command:\n\n\n# zpool replace test c0t0d1 c0t0d2\n\n\nIf the device has been replaced by another disk in the same physical\nslot, then the device can be replaced using a single argument to the 'zpool\nreplace' command:\n\n\n# zpool replace test c0t0d1\n\n\nExisting data will be resilvered to the new device. Once the\nresilvering completes, the device will be removed from the pool.\n\nFor an exported pool\n\nIf this error is encountered during a 'zpool import', it means that one\nof the devices is not attached to the system:\n\n\n# zpool import\n pool: test\n id: 10121266328238932306\n state: DEGRADED\nstatus: One or more devices are missing from the system.\naction: The pool can be imported despite missing or damaged devices. The\n fault tolerance of the pool may be compromised if imported.\n see: http://www.sun.com/msg/ZFS-8000-2Q\nconfig:\n\n test DEGRADED\n mirror DEGRADED\n c0t0d0 ONLINE\n c0t0d1 FAULTED cannot open\n\n\nUnlike when the pool is active on the system, the device cannot be\nreplaced while the pool is exported. If the device can be attached to the\nsystem, attach the device and run 'zpool import' again.\n\nAlternatively, the pool can be imported as-is, though it will be placed\nin the DEGRADED state due to a missing device. The device will be marked as\nUNAVAIL. Once the pool has been imported, the missing device can be replaced as\ndescribed above.\n "
+msgstr "\nFor an active pool\n\nIf this error was encountered while running 'zpool import', please see\nthe section below. Otherwise, run 'zpool status -x' to determine which pool has\nexperienced a failure:\n\n\n# zpool status -x\n pool: test\n state: DEGRADED\nstatus: One or more devices could not be opened. Sufficient replicas exist for\n the pool to continue functioning in a degraded state.\naction: Attach the missing device and online it using 'zpool online'.\n see: https://www.illumos.org/msg/ZFS-8000-2Q\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test DEGRADED 0 0 0\n mirror DEGRADED 0 0 0\n c0t0d0 ONLINE 0 0 0\n c0t0d1 FAULTED 0 0 0 cannot open\n\nerrors: No known data errors\n\n\nDetermine which device failed to open by looking for a FAULTED device\nwith an additional 'cannot open' message. If this device has been inadvertently\nremoved from the system, attach the device and bring it online with 'zpool\nonline':\n\n\n# zpool online test c0t0d1\n\n\nIf the device is no longer available, the device can be replaced using\nthe 'zpool replace' command:\n\n\n# zpool replace test c0t0d1 c0t0d2\n\n\nIf the device has been replaced by another disk in the same physical\nslot, then the device can be replaced using a single argument to the 'zpool\nreplace' command:\n\n\n# zpool replace test c0t0d1\n\n\nExisting data will be resilvered to the new device. Once the\nresilvering completes, the device will be removed from the pool.\n\nFor an exported pool\n\nIf this error is encountered during a 'zpool import', it means that one\nof the devices is not attached to the system:\n\n\n# zpool import\n pool: test\n id: 10121266328238932306\n state: DEGRADED\nstatus: One or more devices are missing from the system.\naction: The pool can be imported despite missing or damaged devices. The\n fault tolerance of the pool may be compromised if imported.\n see: https://www.illumos.org/msg/ZFS-8000-2Q\nconfig:\n\n test DEGRADED\n mirror DEGRADED\n c0t0d0 ONLINE\n c0t0d1 FAULTED cannot open\n\n\nUnlike when the pool is active on the system, the device cannot be\nreplaced while the pool is exported. If the device can be attached to the\nsystem, attach the device and run 'zpool import' again.\n\nAlternatively, the pool can be imported as-is, though it will be placed\nin the DEGRADED state due to a missing device. The device will be marked as\nUNAVAIL. Once the pool has been imported, the missing device can be replaced as\ndescribed above.\n "
#
# code: ZFS-8000-3C
# keys: ereport.fs.zfs.device.missing_nr
@@ -71,7 +71,7 @@ msgstr "No automated response will be taken."
msgid "ZFS-8000-3C.impact"
msgstr "The pool is no longer available"
msgid "ZFS-8000-3C.action"
-msgstr "\nFor an active pool\n\nIf this error was encountered while running 'zpool import', please see\nthe section below. Otherwise, run 'zpool status -x' to determine which pool\nhas experienced a failure:\n\n\n# zpool status -x\n pool: test\n state: FAULTED\nstatus: One or more devices could not be opened. There are insufficient\n replicas for the pool to continue functioning.\naction: Attach the missing device and online it using 'zpool online'.\n see: http://www.sun.com/msg/ZFS-8000-3C\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test FAULTED 0 0 0 insufficient replicas\n c0t0d0 ONLINE 0 0 0\n c0t0d1 FAULTED 0 0 0 cannot open\n\nerrors: No known data errors\n\n\nIf the device has been temporarily detached from the system, attach the\ndevice to the system and run 'zpool status' again. The pool should\nautomatically detect the newly attached device and resume functioning. You may\nhave to mount the filesystems in the pool explicitly using 'zfs\nmount -a'.\n\nIf the device is no longer available and cannot be reattached to the\nsystem, then the pool must be destroyed and re-created from a backup\nsource.\n\nFor an exported pool\n\nIf this error is encountered during a 'zpool import', it means that one\nof the devices is not attached to the system:\n\n\n# zpool import\n pool: test\n id: 10121266328238932306\n state: FAULTED\nstatus: One or more devices are missing from the system.\naction: The pool cannot be imported. Attach the missing devices and\n try again.\n see: http://www.sun.com/msg/ZFS-8000-3C\nconfig:\n\n test FAULTED insufficient replicas\n c0t0d0 ONLINE\n c0t0d1 FAULTED cannot open\n\n\nThe pool cannot be imported until the missing device is attached to the\nsystem. If the device has been made available in an alternate location, use the\n'-d' option to 'zpool import' to search for devices in a different directory.\nIf the missing device is unavailable, then the pool cannot be imported.\n "
+msgstr "\nFor an active pool\n\nIf this error was encountered while running 'zpool import', please see\nthe section below. Otherwise, run 'zpool status -x' to determine which pool\nhas experienced a failure:\n\n\n# zpool status -x\n pool: test\n state: FAULTED\nstatus: One or more devices could not be opened. There are insufficient\n replicas for the pool to continue functioning.\naction: Attach the missing device and online it using 'zpool online'.\n see: https://www.illumos.org/msg/ZFS-8000-3C\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test FAULTED 0 0 0 insufficient replicas\n c0t0d0 ONLINE 0 0 0\n c0t0d1 FAULTED 0 0 0 cannot open\n\nerrors: No known data errors\n\n\nIf the device has been temporarily detached from the system, attach the\ndevice to the system and run 'zpool status' again. The pool should\nautomatically detect the newly attached device and resume functioning. You may\nhave to mount the filesystems in the pool explicitly using 'zfs\nmount -a'.\n\nIf the device is no longer available and cannot be reattached to the\nsystem, then the pool must be destroyed and re-created from a backup\nsource.\n\nFor an exported pool\n\nIf this error is encountered during a 'zpool import', it means that one\nof the devices is not attached to the system:\n\n\n# zpool import\n pool: test\n id: 10121266328238932306\n state: FAULTED\nstatus: One or more devices are missing from the system.\naction: The pool cannot be imported. Attach the missing devices and\n try again.\n see: https://www.illumos.org/msg/ZFS-8000-3C\nconfig:\n\n test FAULTED insufficient replicas\n c0t0d0 ONLINE\n c0t0d1 FAULTED cannot open\n\n\nThe pool cannot be imported until the missing device is attached to the\nsystem. If the device has been made available in an alternate location, use the\n'-d' option to 'zpool import' to search for devices in a different directory.\nIf the missing device is unavailable, then the pool cannot be imported.\n "
#
# code: ZFS-8000-4J
# keys: ereport.fs.zfs.device.corrupt_label_r
@@ -87,7 +87,7 @@ msgstr "A hot spare will be activated if available."
msgid "ZFS-8000-4J.impact"
msgstr "The pool is no longer providing the configured level of\n replication."
msgid "ZFS-8000-4J.action"
-msgstr "\nFor an active pool\n\nIf this error was encountered while running 'zpool import', please see\nthe section below. Otherwise, run 'zpool status -x' to determine which pool\nhas experienced a failure:\n\n\n\n# zpool status -x\n pool: test\n state: DEGRADED\nstatus: One or more devices could not be used because the label is missing or\n invalid. Sufficient replicas exist for the pool to continue\n functioning in a degraded state.\naction: Replace the device using 'zpool replace'.\n see: http://www.sun.com/msg/ZFS-8000-4J\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test DEGRADED 0 0 0\n mirror DEGRADED 0 0 0\n c0t0d0 ONLINE 0 0 0\n c0t0d1 FAULTED 0 0 0 corrupted data\n\nerrors: No known data errors\n\n\nIf the device has been temporarily detached from the system, attach the\ndevice to the system and run 'zpool status' again. The pool should\nautomatically detect the newly attached device and resume functioning.\n\nIf the device is no longer available, it can be replaced using 'zpool\nreplace':\n\n\n# zpool replace test c0t0d1 c0t0d2\n\n\nIf the device has been replaced by another disk in the same physical\nslot, then the device can be replaced using a single argument to the 'zpool\nreplace' command:\n\n\n# zpool replace test c0t0d1\n\n\nZFS will begin migrating data to the new device as soon as the replace\nis issued. Once the resilvering completes, the original device (if different\nfrom the replacement) will be removed, and the pool will be restored to the\nONLINE state.\n\nFor an exported pool\n\nIf this error is encountered while running 'zpool import', the pool can\nbe still be imported despite the failure:\n\n\n# zpool import\n pool: test\n id: 5187963178597328409\n state: DEGRADED\nstatus: One or more devices contains corrupted data. The fault tolerance of\n the pool may be compromised if imported.\naction: The pool can be imported using its name or numeric identifier.\n see: http://www.sun.com/msg/ZFS-8000-4J\nconfig:\n\n test DEGRADED\n mirror DEGRADED\n c0t0d0 ONLINE\n c0t0d1 FAULTED corrupted data\n\n\nTo import the pool, run 'zpool import':\n\n\n# zpool import test\n\n\nOnce the pool has been imported, the damaged device can be replaced\naccording to the above procedure.\n "
+msgstr "\nFor an active pool\n\nIf this error was encountered while running 'zpool import', please see\nthe section below. Otherwise, run 'zpool status -x' to determine which pool\nhas experienced a failure:\n\n\n\n# zpool status -x\n pool: test\n state: DEGRADED\nstatus: One or more devices could not be used because the label is missing or\n invalid. Sufficient replicas exist for the pool to continue\n functioning in a degraded state.\naction: Replace the device using 'zpool replace'.\n see: https://www.illumos.org/msg/ZFS-8000-4J\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test DEGRADED 0 0 0\n mirror DEGRADED 0 0 0\n c0t0d0 ONLINE 0 0 0\n c0t0d1 FAULTED 0 0 0 corrupted data\n\nerrors: No known data errors\n\n\nIf the device has been temporarily detached from the system, attach the\ndevice to the system and run 'zpool status' again. The pool should\nautomatically detect the newly attached device and resume functioning.\n\nIf the device is no longer available, it can be replaced using 'zpool\nreplace':\n\n\n# zpool replace test c0t0d1 c0t0d2\n\n\nIf the device has been replaced by another disk in the same physical\nslot, then the device can be replaced using a single argument to the 'zpool\nreplace' command:\n\n\n# zpool replace test c0t0d1\n\n\nZFS will begin migrating data to the new device as soon as the replace\nis issued. Once the resilvering completes, the original device (if different\nfrom the replacement) will be removed, and the pool will be restored to the\nONLINE state.\n\nFor an exported pool\n\nIf this error is encountered while running 'zpool import', the pool can\nbe still be imported despite the failure:\n\n\n# zpool import\n pool: test\n id: 5187963178597328409\n state: DEGRADED\nstatus: One or more devices contains corrupted data. The fault tolerance of\n the pool may be compromised if imported.\naction: The pool can be imported using its name or numeric identifier.\n see: https://www.illumos.org/msg/ZFS-8000-4J\nconfig:\n\n test DEGRADED\n mirror DEGRADED\n c0t0d0 ONLINE\n c0t0d1 FAULTED corrupted data\n\n\nTo import the pool, run 'zpool import':\n\n\n# zpool import test\n\n\nOnce the pool has been imported, the damaged device can be replaced\naccording to the above procedure.\n "
#
# code: ZFS-8000-5E
# keys: ereport.fs.zfs.device.corrupt_label_nr
@@ -103,7 +103,7 @@ msgstr "No automated response will be taken."
msgid "ZFS-8000-5E.impact"
msgstr "The pool is no longer available"
msgid "ZFS-8000-5E.action"
-msgstr "\nFor an active pool\n\nIf this error was encountered while running 'zpool import', please see\nthe section below. Otherwise, run 'zpool status -x' to determine which pool\nhas experienced a failure:\n\n\n# zpool status -x\n pool: test\n state: FAULTED\nstatus: One or more devices could not be used because the the label is missing \n or invalid. There are insufficient replicas for the pool to continue\n functioning.\naction: Destroy and re-create the pool from a backup source.\n see: http://www.sun.com/msg/ZFS-8000-5E\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test FAULTED 0 0 0 insufficient replicas\n c0t0d0 FAULTED 0 0 0 corrupted data\n c0t0d1 ONLINE 0 0 0\n\nerrors: No known data errors\n\n\nThe device listed as FAULTED with 'corrupted data' cannot be opened due\nto a corrupt label. ZFS will be unable to use the pool, and all data within the\npool is irrevocably lost. The pool must be destroyed and recreated from an\nappropriate backup source. Using replicated configurations will prevent this\nfrom happening in the future.\n\nFor an exported pool\n\nIf this error is encountered during 'zpool import', the action is the\nsame. The pool cannot be imported - all data is lost and must be restored from\nan appropriate backup source.\n "
+msgstr "\nFor an active pool\n\nIf this error was encountered while running 'zpool import', please see\nthe section below. Otherwise, run 'zpool status -x' to determine which pool\nhas experienced a failure:\n\n\n# zpool status -x\n pool: test\n state: FAULTED\nstatus: One or more devices could not be used because the the label is missing \n or invalid. There are insufficient replicas for the pool to continue\n functioning.\naction: Destroy and re-create the pool from a backup source.\n see: https://www.illumos.org/msg/ZFS-8000-5E\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test FAULTED 0 0 0 insufficient replicas\n c0t0d0 FAULTED 0 0 0 corrupted data\n c0t0d1 ONLINE 0 0 0\n\nerrors: No known data errors\n\n\nThe device listed as FAULTED with 'corrupted data' cannot be opened due\nto a corrupt label. ZFS will be unable to use the pool, and all data within the\npool is irrevocably lost. The pool must be destroyed and recreated from an\nappropriate backup source. Using replicated configurations will prevent this\nfrom happening in the future.\n\nFor an exported pool\n\nIf this error is encountered during 'zpool import', the action is the\nsame. The pool cannot be imported - all data is lost and must be restored from\nan appropriate backup source.\n "
#
# code: ZFS-8000-6X
# keys: ereport.fs.zfs.pool.bad_guid_sum
@@ -119,7 +119,7 @@ msgstr "No automated response will be taken."
msgid "ZFS-8000-6X.impact"
msgstr "The pool cannot be imported"
msgid "ZFS-8000-6X.action"
-msgstr "\nRun 'zpool import' to list which pool cannot be imported:\n\n\n# zpool import\n pool: test\n id: 13783646421373024673\n state: FAULTED\nstatus: One or more devices are missing from the system.\naction: The pool cannot be imported. Attach the missing\n devices and try again.\n see: http://www.sun.com/msg/ZFS-8000-6X\nconfig:\n\n test FAULTED missing device\n c0t0d0 ONLINE\n\n Additional devices are known to be part of this pool, though their\n exact configuration cannot be determined.\n\n\nZFS attempts to store enough configuration data on the devices such\nthat the configuration is recoverable from any subset of devices. In some\ncases, particularly when an entire toplevel virtual device is not attached to\nthe system, ZFS will be unable to determine the complete configuration. It will\nalways detect that these devices are missing, even if it cannot identify all of\nthe devices.\n\nThe pool cannot be imported until the unknown missing device is\nattached to the system. If the device has been made available in an alternate\nlocation, use the '-d' option to 'zpool import' to search for devices in a\ndifferent directory. If the missing device is unavailable, then the pool cannot\nbe imported.\n "
+msgstr "\nRun 'zpool import' to list which pool cannot be imported:\n\n\n# zpool import\n pool: test\n id: 13783646421373024673\n state: FAULTED\nstatus: One or more devices are missing from the system.\naction: The pool cannot be imported. Attach the missing\n devices and try again.\n see: https://www.illumos.org/msg/ZFS-8000-6X\nconfig:\n\n test FAULTED missing device\n c0t0d0 ONLINE\n\n Additional devices are known to be part of this pool, though their\n exact configuration cannot be determined.\n\n\nZFS attempts to store enough configuration data on the devices such\nthat the configuration is recoverable from any subset of devices. In some\ncases, particularly when an entire toplevel virtual device is not attached to\nthe system, ZFS will be unable to determine the complete configuration. It will\nalways detect that these devices are missing, even if it cannot identify all of\nthe devices.\n\nThe pool cannot be imported until the unknown missing device is\nattached to the system. If the device has been made available in an alternate\nlocation, use the '-d' option to 'zpool import' to search for devices in a\ndifferent directory. If the missing device is unavailable, then the pool cannot\nbe imported.\n "
#
# code: ZFS-8000-72
# keys: ereport.fs.zfs.pool.corrupt_pool
@@ -135,7 +135,7 @@ msgstr "No automated response will be taken."
msgid "ZFS-8000-72.impact"
msgstr "The pool is no longer available"
msgid "ZFS-8000-72.action"
-msgstr "\nEven though all the devices are available, the on-disk data\nhas been corrupted such that the pool cannot be opened. If a recovery\naction is presented, the pool can be returned to a usable state.\nOtherwise, all data within the pool is lost, and the pool must be\ndestroyed and restored from an appropriate backup source. ZFS\nincludes built-in metadata replication to prevent this from happening\neven for unreplicated pools, but running in a replicated configuration\nwill decrease the chances of this happening in the future.\n\nIf this error is encountered during 'zpool import', see the\nsection below. Otherwise, run 'zpool status -x' to determine which\npool is faulted and if a recovery option is available:\n\n\n# zpool status -x\n pool: test\n id: 13783646421373024673\n state: FAULTED\nstatus: The pool metadata is corrupted and cannot be opened.\naction: Recovery is possible, but will result in some data loss.\n Returning the pool to its state as of Mon Sep 28 10:24:39 2009\n should correct the problem. Approximately 59 seconds of data\n will have to be discarded, irreversibly. Recovery can be\n attempted by executing 'zpool clear -F test'. A scrub of the pool\n is strongly recommended following a successful recovery.\n see: http://www.sun.com/msg/ZFS-8000-72\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test FAULTED 0 0 2 corrupted data\n c0t0d0 ONLINE 0 0 2\n c0t0d1 ONLINE 0 0 2\n\n\nIf recovery is unavailable, the recommended action will be:\n\n\naction: Destroy the pool and restore from backup.\n\n\nIf this error is encountered during 'zpool import', and if no\nrecovery option is mentioned, the pool is unrecoverable and cannot be\nimported. The pool must be restored from an appropriate backup\nsource. If a recovery option is available, the output from 'zpool\nimport' will look something like the following:\n\n\n# zpool import share\ncannot import 'share': I/O error\n Recovery is possible, but will result in some data loss.\n Returning the pool to its state as of Sun Sep 27 12:31:07 2009\n should correct the problem. Approximately 53 seconds of data\n will have to be discarded, irreversibly. Recovery can be\n attempted by executing 'zpool import -F share'. A scrub of the pool\n is strongly recommended following a successful recovery.\n\n\nRecovery actions are requested with the -F option to either\n'zpool clear' or 'zpool import'. Recovery will result in some data\nloss, because it reverts the pool to an earlier state. A dry-run\nrecovery check can be performed by adding the -n option, affirming if\nrecovery is possible without actually reverting the pool to its\nearlier state.\n"
+msgstr "\nEven though all the devices are available, the on-disk data\nhas been corrupted such that the pool cannot be opened. If a recovery\naction is presented, the pool can be returned to a usable state.\nOtherwise, all data within the pool is lost, and the pool must be\ndestroyed and restored from an appropriate backup source. ZFS\nincludes built-in metadata replication to prevent this from happening\neven for unreplicated pools, but running in a replicated configuration\nwill decrease the chances of this happening in the future.\n\nIf this error is encountered during 'zpool import', see the\nsection below. Otherwise, run 'zpool status -x' to determine which\npool is faulted and if a recovery option is available:\n\n\n# zpool status -x\n pool: test\n id: 13783646421373024673\n state: FAULTED\nstatus: The pool metadata is corrupted and cannot be opened.\naction: Recovery is possible, but will result in some data loss.\n Returning the pool to its state as of Mon Sep 28 10:24:39 2009\n should correct the problem. Approximately 59 seconds of data\n will have to be discarded, irreversibly. Recovery can be\n attempted by executing 'zpool clear -F test'. A scrub of the pool\n is strongly recommended following a successful recovery.\n see: https://www.illumos.org/msg/ZFS-8000-72\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test FAULTED 0 0 2 corrupted data\n c0t0d0 ONLINE 0 0 2\n c0t0d1 ONLINE 0 0 2\n\n\nIf recovery is unavailable, the recommended action will be:\n\n\naction: Destroy the pool and restore from backup.\n\n\nIf this error is encountered during 'zpool import', and if no\nrecovery option is mentioned, the pool is unrecoverable and cannot be\nimported. The pool must be restored from an appropriate backup\nsource. If a recovery option is available, the output from 'zpool\nimport' will look something like the following:\n\n\n# zpool import share\ncannot import 'share': I/O error\n Recovery is possible, but will result in some data loss.\n Returning the pool to its state as of Sun Sep 27 12:31:07 2009\n should correct the problem. Approximately 53 seconds of data\n will have to be discarded, irreversibly. Recovery can be\n attempted by executing 'zpool import -F share'. A scrub of the pool\n is strongly recommended following a successful recovery.\n\n\nRecovery actions are requested with the -F option to either\n'zpool clear' or 'zpool import'. Recovery will result in some data\nloss, because it reverts the pool to an earlier state. A dry-run\nrecovery check can be performed by adding the -n option, affirming if\nrecovery is possible without actually reverting the pool to its\nearlier state.\n"
#
# code: ZFS-8000-8A
# keys: ereport.fs.zfs.object.corrupt_data
@@ -151,7 +151,7 @@ msgstr "No automated response will be taken."
msgid "ZFS-8000-8A.impact"
msgstr "The file or directory is unavailable."
msgid "ZFS-8000-8A.action"
-msgstr "\nRun 'zpool status -x' to determine which pool is damaged:\n\n\n# zpool status -x\n pool: test\n state: ONLINE\nstatus: One or more devices has experienced an error and no valid replicas\n are available. Some filesystem data is corrupt, and applications\n may have been affected.\naction: Destroy the pool and restore from backup.\n see: http://www.sun.com/msg/ZFS-8000-8A\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test ONLINE 0 0 2\n c0t0d0 ONLINE 0 0 2\n c0t0d1 ONLINE 0 0 0\n\nerrors: 1 data errors, use '-v' for a list\n\n\nUnfortunately, the data cannot be repaired, and the only choice to\nrepair the data is to restore the pool from backup. Applications attempting to\naccess the corrupted data will get an error (EIO), and data may be permanently\nlost.\n\nOn recent versions of Solaris, the list of affected files can be\nretrieved by using the '-v' option to 'zpool status':\n\n\n# zpool status -xv\n pool: test\n state: ONLINE\nstatus: One or more devices has experienced an error and no valid replicas\n are available. Some filesystem data is corrupt, and applications\n may have been affected.\naction: Destroy the pool and restore from backup.\n see: http://www.sun.com/msg/ZFS-8000-8A\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test ONLINE 0 0 2\n c0t0d0 ONLINE 0 0 2\n c0t0d1 ONLINE 0 0 0\n\nerrors: Permanent errors have been detected in the following files:\n\n /export/example/foo\n\n\nDamaged files may or may not be able to be removed depending on the\ntype of corruption. If the corruption is within the plain data, the file should\nbe removable. If the corruption is in the file metadata, then the file cannot\nbe removed, though it can be moved to an alternate location. In either case,\nthe data should be restored from a backup source. It is also possible for the\ncorruption to be within pool-wide metadata, resulting in entire datasets being\nunavailable. If this is the case, the only option is to destroy the pool and\nre-create the datasets from backup.\n "
+msgstr "\nRun 'zpool status -x' to determine which pool is damaged:\n\n\n# zpool status -x\n pool: test\n state: ONLINE\nstatus: One or more devices has experienced an error and no valid replicas\n are available. Some filesystem data is corrupt, and applications\n may have been affected.\naction: Destroy the pool and restore from backup.\n see: https://www.illumos.org/msg/ZFS-8000-8A\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test ONLINE 0 0 2\n c0t0d0 ONLINE 0 0 2\n c0t0d1 ONLINE 0 0 0\n\nerrors: 1 data errors, use '-v' for a list\n\n\nUnfortunately, the data cannot be repaired, and the only choice to\nrepair the data is to restore the pool from backup. Applications attempting to\naccess the corrupted data will get an error (EIO), and data may be permanently\nlost.\n\nOn recent versions of Solaris, the list of affected files can be\nretrieved by using the '-v' option to 'zpool status':\n\n\n# zpool status -xv\n pool: test\n state: ONLINE\nstatus: One or more devices has experienced an error and no valid replicas\n are available. Some filesystem data is corrupt, and applications\n may have been affected.\naction: Destroy the pool and restore from backup.\n see: https://www.illumos.org/msg/ZFS-8000-8A\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test ONLINE 0 0 2\n c0t0d0 ONLINE 0 0 2\n c0t0d1 ONLINE 0 0 0\n\nerrors: Permanent errors have been detected in the following files:\n\n /export/example/foo\n\n\nDamaged files may or may not be able to be removed depending on the\ntype of corruption. If the corruption is within the plain data, the file should\nbe removable. If the corruption is in the file metadata, then the file cannot\nbe removed, though it can be moved to an alternate location. In either case,\nthe data should be restored from a backup source. It is also possible for the\ncorruption to be within pool-wide metadata, resulting in entire datasets being\nunavailable. If this is the case, the only option is to destroy the pool and\nre-create the datasets from backup.\n "
#
# code: ZFS-8000-9P
# keys: ereport.fs.zfs.device.failing
@@ -167,7 +167,7 @@ msgstr "ZFS has attempted to repair the affected data."
msgid "ZFS-8000-9P.impact"
msgstr "The system is unaffected, though errors may indicate future\n failure. Future errors may cause ZFS to automatically fault\n the device."
msgid "ZFS-8000-9P.action"
-msgstr "\nRun 'zpool status -x' to determine which pool has experienced\nerrors:\n\n\n# zpool status\n pool: test\n state: ONLINE\nstatus: One or more devices has experienced an unrecoverable error. An\n attempt was made to correct the error. Applications are unaffected.\naction: Determine if the device needs to be replaced, and clear the errors\n using 'zpool online' or replace the device with 'zpool replace'.\n see: http://www.sun.com/msg/ZFS-8000-9P\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test ONLINE 0 0 0\n mirror ONLINE 0 0 0\n c0t0d0 ONLINE 0 0 2\n c0t0d1 ONLINE 0 0 0\n\nerrors: No known data errors\n\n\nFind the device with a non-zero error count for READ, WRITE, or CKSUM.\nThis indicates that the device has experienced a read I/O error, write I/O\nerror, or checksum validation error. Because the device is part of a mirror or\nRAID-Z device, ZFS was able to recover from the error and subsequently repair\nthe damaged data.\n\nIf these errors persist over a period of time, ZFS may determine the\ndevice is faulty and mark it as such. However, these error counts may or may\nnot indicate that the device is unusable. It depends on how the errors were\ncaused, which the administrator can determine in advance of any ZFS diagnosis.\nFor example, the following cases will all produce errors that do not indicate\npotential device failure:\n\n\nA network attached device lost connectivity but has now\nrecovered\nA device suffered from a bit flip, an expected event over long\nperiods of time\nAn administrator accidentally wrote over a portion of the disk using\nanother program\n\n\nIn these cases, the presence of errors does not indicate that the\ndevice is likely to fail in the future, and therefore does not need to be\nreplaced. If this is the case, then the device errors should be cleared using\n'zpool clear':\n\n\n# zpool clear test c0t0d0\n\n\nOn the other hand, errors may very well indicate that the device has\nfailed or is about to fail. If there are continual I/O errors to a device that\nis otherwise attached and functioning on the system, it most likely needs to be\nreplaced. The administrator should check the system log for any driver\nmessages that may indicate hardware failure. If it is determined that the\ndevice needs to be replaced, then the 'zpool replace' command should be\nused:\n\n\n# zpool replace test c0t0d0 c0t0d2\n\n\nThis will attach the new device to the pool and begin resilvering data\nto it. Once the resilvering process is complete, the old device will\nautomatically be removed from the pool, at which point it can safely be removed\nfrom the system. If the device needs to be replaced in-place (because there are\nno available spare devices), the original device can be removed and replaced\nwith a new device, at which point a different form of 'zpool replace' can be\nused:\n\n\n# zpool replace test c0t0d0\n\n\nThis assumes that the original device at 'c0t0d0' has been replaced\nwith a new device under the same path, and will be replaced\nappropriately.\n\nYou can monitor the progress of the resilvering operation by using the\n'zpool status -x' command:\n\n\n# zpool status -x\n pool: test\n state: DEGRADED\nstatus: One or more devices is currently being replaced. The pool may not be\n providing the necessary level of replication.\naction: Wait for the resilvering operation to complete\n scrub: resilver in progress, 0.14% done, 0h0m to go\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test ONLINE 0 0 0\n mirror ONLINE 0 0 0\n replacing ONLINE 0 0 0\n c0t0d0 ONLINE 0 0 3\n c0t0d2 ONLINE 0 0 0 58.5K resilvered\n c0t0d1 ONLINE 0 0 0\n\nerrors: No known data errors\n\n "
+msgstr "\nRun 'zpool status -x' to determine which pool has experienced\nerrors:\n\n\n# zpool status\n pool: test\n state: ONLINE\nstatus: One or more devices has experienced an unrecoverable error. An\n attempt was made to correct the error. Applications are unaffected.\naction: Determine if the device needs to be replaced, and clear the errors\n using 'zpool online' or replace the device with 'zpool replace'.\n see: https://www.illumos.org/msg/ZFS-8000-9P\n scrub: none requested\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test ONLINE 0 0 0\n mirror ONLINE 0 0 0\n c0t0d0 ONLINE 0 0 2\n c0t0d1 ONLINE 0 0 0\n\nerrors: No known data errors\n\n\nFind the device with a non-zero error count for READ, WRITE, or CKSUM.\nThis indicates that the device has experienced a read I/O error, write I/O\nerror, or checksum validation error. Because the device is part of a mirror or\nRAID-Z device, ZFS was able to recover from the error and subsequently repair\nthe damaged data.\n\nIf these errors persist over a period of time, ZFS may determine the\ndevice is faulty and mark it as such. However, these error counts may or may\nnot indicate that the device is unusable. It depends on how the errors were\ncaused, which the administrator can determine in advance of any ZFS diagnosis.\nFor example, the following cases will all produce errors that do not indicate\npotential device failure:\n\n\nA network attached device lost connectivity but has now\nrecovered\nA device suffered from a bit flip, an expected event over long\nperiods of time\nAn administrator accidentally wrote over a portion of the disk using\nanother program\n\n\nIn these cases, the presence of errors does not indicate that the\ndevice is likely to fail in the future, and therefore does not need to be\nreplaced. If this is the case, then the device errors should be cleared using\n'zpool clear':\n\n\n# zpool clear test c0t0d0\n\n\nOn the other hand, errors may very well indicate that the device has\nfailed or is about to fail. If there are continual I/O errors to a device that\nis otherwise attached and functioning on the system, it most likely needs to be\nreplaced. The administrator should check the system log for any driver\nmessages that may indicate hardware failure. If it is determined that the\ndevice needs to be replaced, then the 'zpool replace' command should be\nused:\n\n\n# zpool replace test c0t0d0 c0t0d2\n\n\nThis will attach the new device to the pool and begin resilvering data\nto it. Once the resilvering process is complete, the old device will\nautomatically be removed from the pool, at which point it can safely be removed\nfrom the system. If the device needs to be replaced in-place (because there are\nno available spare devices), the original device can be removed and replaced\nwith a new device, at which point a different form of 'zpool replace' can be\nused:\n\n\n# zpool replace test c0t0d0\n\n\nThis assumes that the original device at 'c0t0d0' has been replaced\nwith a new device under the same path, and will be replaced\nappropriately.\n\nYou can monitor the progress of the resilvering operation by using the\n'zpool status -x' command:\n\n\n# zpool status -x\n pool: test\n state: DEGRADED\nstatus: One or more devices is currently being replaced. The pool may not be\n providing the necessary level of replication.\naction: Wait for the resilvering operation to complete\n scrub: resilver in progress, 0.14% done, 0h0m to go\nconfig:\n\n NAME STATE READ WRITE CKSUM\n test ONLINE 0 0 0\n mirror ONLINE 0 0 0\n replacing ONLINE 0 0 0\n c0t0d0 ONLINE 0 0 3\n c0t0d2 ONLINE 0 0 0 58.5K resilvered\n c0t0d1 ONLINE 0 0 0\n\nerrors: No known data errors\n\n "
#
# code: ZFS-8000-A5
# keys: ereport.fs.zfs.device.version_mismatch
@@ -231,7 +231,7 @@ msgstr "No automated response will be taken."
msgid "ZFS-8000-EY.impact"
msgstr "ZFS filesystems are not available"
msgid "ZFS-8000-EY.action"
-msgstr "\n\nThe pool has been written to from another host, and was not cleanly exported\nfrom the other system. Actively importing a pool on multiple systems will\ncorrupt the pool and leave it in an unrecoverable state. To determine which\nsystem last accessed the pool, run the 'zpool import' command:\n\n\n# zpool import\n pool: test\n id: 14702934086626715962\nstate: ONLINE\nstatus: The pool was last accessed by another system.\naction: The pool can be imported using its name or numeric identifier and\n the '-f' flag.\n see: http://www.sun.com/msg/ZFS-8000-EY\nconfig:\n\n test ONLINE\n c0t0d0 ONLINE\n\n# zpool import test\ncannot import 'test': pool may be in use from other system, it was last\naccessed by 'tank' (hostid: 0x1435718c) on Fri Mar 9 15:42:47 2007\nuse '-f' to import anyway\n\n\n\nIf you are certain that the pool is not being actively accessed by another\nsystem, then you can use the '-f' option to 'zpool import' to forcibly\nimport the pool.\n\n "
+msgstr "\n\nThe pool has been written to from another host, and was not cleanly exported\nfrom the other system. Actively importing a pool on multiple systems will\ncorrupt the pool and leave it in an unrecoverable state. To determine which\nsystem last accessed the pool, run the 'zpool import' command:\n\n\n# zpool import\n pool: test\n id: 14702934086626715962\nstate: ONLINE\nstatus: The pool was last accessed by another system.\naction: The pool can be imported using its name or numeric identifier and\n the '-f' flag.\n see: https://www.illumos.org/msg/ZFS-8000-EY\nconfig:\n\n test ONLINE\n c0t0d0 ONLINE\n\n# zpool import test\ncannot import 'test': pool may be in use from other system, it was last\naccessed by 'tank' (hostid: 0x1435718c) on Fri Mar 9 15:42:47 2007\nuse '-f' to import anyway\n\n\n\nIf you are certain that the pool is not being actively accessed by another\nsystem, then you can use the '-f' option to 'zpool import' to forcibly\nimport the pool.\n\n "
#
# code: ZFS-8000-FD
# keys: fault.fs.zfs.vdev.io
diff --git a/usr/src/cmd/fm/fmadm/common/faulty.c b/usr/src/cmd/fm/fmadm/common/faulty.c
index 64327a1..0eeb2e6 100644
--- a/usr/src/cmd/fm/fmadm/common/faulty.c
+++ b/usr/src/cmd/fm/fmadm/common/faulty.c
@@ -100,7 +100,7 @@
*
* Description : The number of errors associated with this memory module has
* exceeded acceptable levels. Refer to
- * http://sun.com/msg/AMD-8000-2F for more information.
+ * https://www.illumos.org/msg/AMD-8000-2F for more information.
*
* Response : Pages of memory associated with this memory module are being
* removed from service as errors are reported.
@@ -125,7 +125,7 @@
* 5ca4aeb3-36...f6be-c2e8166dc484 2 suspects in this FRU total certainty 100%
*
* Description : A problem was detected for a PCI device.
- * Refer to http://sun.com/msg/PCI-8000-7J for more information.
+ * Refer to https://www.illumos.org/msg/PCI-8000-7J for more information.
*
* Response : One or more device instances may be disabled
*
diff --git a/usr/src/cmd/fm/modules/common/syslog-msgs/syslog.c b/usr/src/cmd/fm/modules/common/syslog-msgs/syslog.c
index 30b817b..17b27e6 100644
--- a/usr/src/cmd/fm/modules/common/syslog-msgs/syslog.c
+++ b/usr/src/cmd/fm/modules/common/syslog-msgs/syslog.c
@@ -318,7 +318,7 @@ static const fmd_prop_t fmd_props[] = {
{ "facility", FMD_TYPE_STRING, "LOG_DAEMON" },
{ "gmt", FMD_TYPE_BOOL, "false" },
{ "syslogd", FMD_TYPE_BOOL, "true" },
- { "url", FMD_TYPE_STRING, "http://sun.com/msg/" },
+ { "url", FMD_TYPE_STRING, "https://www.illumos.org/msg/" },
{ "message_all", FMD_TYPE_BOOL, "false" },
{ NULL, 0, NULL }
};
diff --git a/usr/src/cmd/svc/configd/backend.c b/usr/src/cmd/svc/configd/backend.c
index 74b25fc..6d04b86 100644
--- a/usr/src/cmd/svc/configd/backend.c
+++ b/usr/src/cmd/svc/configd/backend.c
@@ -1993,7 +1993,7 @@ integrity_fail:
" /lib/svc/bin/restore_repository\n"
"\n"
" can be run to restore a backup version of your repository. See\n"
-" http://sun.com/msg/SMF-8000-MY for more information.\n"
+" https://www.illumos.org/msg/SMF-8000-MY for more information.\n"
"\n",
db_file,
(fname == NULL)? ":\n\n" : " is in:\n\n ",
diff --git a/usr/src/cmd/svc/configd/restore_repository.sh b/usr/src/cmd/svc/configd/restore_repository.sh
index 8ba38ce..488a306 100644
--- a/usr/src/cmd/svc/configd/restore_repository.sh
+++ b/usr/src/cmd/svc/configd/restore_repository.sh
@@ -37,7 +37,7 @@ usage()
{
echo "usage: $0 [-r rootdir]" >&2
echo "
-See http://sun.com/msg/SMF-8000-MY for more information on the use of
+See https://www.illumos.org/msg/SMF-8000-MY for more information on the use of
this script."
exit 2;
}
@@ -86,7 +86,7 @@ if [ -x /usr/bin/id -a -x /usr/bin/grep ] &&
fi
echo >&2 "
-See http://sun.com/msg/SMF-8000-MY for more information on the use of
+See https://www.illumos.org/msg/SMF-8000-MY for more information on the use of
this script to restore backup copies of the smf(5) repository.
If there are any problems which need human intervention, this script will
diff --git a/usr/src/cmd/svc/milestone/README.share b/usr/src/cmd/svc/milestone/README.share
index 4cb2316..fc5748b 100644
--- a/usr/src/cmd/svc/milestone/README.share
+++ b/usr/src/cmd/svc/milestone/README.share
@@ -145,5 +145,5 @@ route(1M) command. On typical IPv4 networks, this invocation would be
--
(An extended version of this document is available at
-http://sun.com/msg/SMF-8000-QD. That version includes additional
+https://www.illumos.org/msg/SMF-8000-QD. That version includes additional
document references.)
diff --git a/usr/src/cmd/svc/svcs/explain.c b/usr/src/cmd/svc/svcs/explain.c
index 84f5e8f..e991263 100644
--- a/usr/src/cmd/svc/svcs/explain.c
+++ b/usr/src/cmd/svc/svcs/explain.c
@@ -193,7 +193,7 @@ static scf_value_t *g_val;
static scf_iter_t *g_iter, *g_viter;
static char *g_fmri, *g_value;
static size_t g_fmri_sz, g_value_sz;
-static const char *g_msgbase = "http://sun.com/msg/";
+static const char *g_msgbase = "https://www.illumos.org/msg/";
static char *emsg_nomem;
static char *emsg_invalid_dep;
diff --git a/usr/src/cmd/zpool/zpool_main.c b/usr/src/cmd/zpool/zpool_main.c
index 1cb2e63..c9ac032 100644
--- a/usr/src/cmd/zpool/zpool_main.c
+++ b/usr/src/cmd/zpool/zpool_main.c
@@ -1475,7 +1475,7 @@ show_import(nvlist_t *config)
}
if (msgid != NULL)
- (void) printf(gettext(" see: http://www.sun.com/msg/%s\n"),
+ (void) printf(gettext(" see: https://www.illumos.org/msg/%s\n"),
msgid);
(void) printf(gettext("config:\n\n"));
@@ -3496,7 +3496,7 @@ print_dedup_stats(nvlist_t *config)
* pool: tank
* status: DEGRADED
* reason: One or more devices ...
- * see: http://www.sun.com/msg/ZFS-xxxx-01
+ * see: https://www.illumos.org/msg/ZFS-xxxx-01
* config:
* mirror DEGRADED
* c1t0d0 OK
@@ -3704,7 +3704,7 @@ status_callback(zpool_handle_t *zhp, void *data)
}
if (msgid != NULL)
- (void) printf(gettext(" see: http://www.sun.com/msg/%s\n"),
+ (void) printf(gettext(" see: https://www.illumos.org/msg/%s\n"),
msgid);
if (config != NULL) {
diff --git a/usr/src/lib/fm/libfmd_adm/common/fmd_adm.c b/usr/src/lib/fm/libfmd_adm/common/fmd_adm.c
index 0d0a294..383308a 100644
--- a/usr/src/lib/fm/libfmd_adm/common/fmd_adm.c
+++ b/usr/src/lib/fm/libfmd_adm/common/fmd_adm.c
@@ -37,7 +37,7 @@
#include <fmd_rpc_adm.h>
static const uint_t _fmd_adm_bufsize = 128 * 1024;
-static const char _url_fallback[] = "http://sun.com/msg/";
+static const char _url_fallback[] = "https://www.illumos.org/msg/";
fmd_adm_t *
fmd_adm_open(const char *host, uint32_t prog, int version)
diff --git a/usr/src/lib/fm/libfmd_msg/common/fmd_msg.c b/usr/src/lib/fm/libfmd_msg/common/fmd_msg.c
index 5735942..bb19f56 100644
--- a/usr/src/lib/fm/libfmd_msg/common/fmd_msg.c
+++ b/usr/src/lib/fm/libfmd_msg/common/fmd_msg.c
@@ -67,7 +67,7 @@
* permitted to contain variable expansions, currently defined as follows:
*
* %% - literal % character
- * %s - knowledge article URL (e.g. http://sun.com/msg/<MSG-ID>)
+ * %s - knowledge article URL (e.g. https://www.illumos.org/msg/<MSG-ID>)
* %< x > - value x from the current event, using the expression syntax below:
*
* foo.bar => print nvlist_t member "bar" contained within nvlist_t "foo"
@@ -145,7 +145,7 @@ static pthread_rwlock_t fmd_msg_rwlock = PTHREAD_RWLOCK_INITIALIZER;
static const char FMD_MSG_DOMAIN[] = "FMD";
static const char FMD_MSG_TEMPLATE[] = "syslog-msgs-message-template";
static const char FMD_MSG_URLKEY[] = "syslog-url";
-static const char FMD_MSG_URLBASE[] = "http://sun.com/msg/";
+static const char FMD_MSG_URLBASE[] = "https://www.illumos.org/msg/";
static const char FMD_MSG_NLSPATH[] = "NLSPATH=/usr/lib/fm/fmd/fmd.cat";
static const char FMD_MSG_MISSING[] = "-";
diff --git a/usr/src/lib/fm/libfmd_msg/common/fmd_msg_test.out b/usr/src/lib/fm/libfmd_msg/common/fmd_msg_test.out
index 4ce3f79..4139f76 100644
--- a/usr/src/lib/fm/libfmd_msg/common/fmd_msg_test.out
+++ b/usr/src/lib/fm/libfmd_msg/common/fmd_msg_test.out
@@ -24,32 +24,32 @@
#
code TEST-8000-08 item 0 = <<Defect>>
code TEST-8000-08 item 1 = <<Minor>>
-code TEST-8000-08 item 2 = <<This entry tests URL variable expansion - url = http://sun.com/msg/TEST-8000-08>>
+code TEST-8000-08 item 2 = <<This entry tests URL variable expansion - url = https://www.illumos.org/msg/TEST-8000-08>>
code TEST-8000-08 item 3 = <<This entry tests the percent character escape sequence: %>>
code TEST-8000-08 item 4 = <<This entry tests variable expansion for event payload members: uuid = %<uuid>, de scheme = %<de.scheme>>>
code TEST-8000-08 item 5 = <<Variable expansion for arrays: index = %<test_arr[1].index>>>
-code TEST-8000-08 item 6 = <<http://sun.com/msg/TEST-8000-08>>
+code TEST-8000-08 item 6 = <<https://www.illumos.org/msg/TEST-8000-08>>
SUNW-MSG-ID: TEST-8000-08, TYPE: Defect, VER: 1, SEVERITY: Minor
PLATFORM: -, CSN: -, HOSTNAME: -
SOURCE: -, REV: -
EVENT-ID: -
-DESC: This entry tests URL variable expansion - url = http://sun.com/msg/TEST-8000-08
+DESC: This entry tests URL variable expansion - url = https://www.illumos.org/msg/TEST-8000-08
AUTO-RESPONSE: This entry tests the percent character escape sequence: %
IMPACT: This entry tests variable expansion for event payload members: uuid = %<uuid>, de scheme = %<de.scheme>
REC-ACTION: Variable expansion for arrays: index = %<test_arr[1].index>
code TEST-8000-08 item 0 = <<Defect>>
code TEST-8000-08 item 1 = <<Minor>>
-code TEST-8000-08 item 2 = <<This entry tests URL variable expansion - url = http://sun.com/msg/TEST-8000-08>>
+code TEST-8000-08 item 2 = <<This entry tests URL variable expansion - url = https://www.illumos.org/msg/TEST-8000-08>>
code TEST-8000-08 item 3 = <<This entry tests the percent character escape sequence: %>>
code TEST-8000-08 item 4 = <<This entry tests variable expansion for event payload members: uuid = 12345678, de scheme = fmd>>
code TEST-8000-08 item 5 = <<Variable expansion for arrays: index = 1>>
-code TEST-8000-08 item 6 = <<http://sun.com/msg/TEST-8000-08>>
+code TEST-8000-08 item 6 = <<https://www.illumos.org/msg/TEST-8000-08>>
SUNW-MSG-ID: TEST-8000-08, TYPE: Defect, VER: 1, SEVERITY: Minor
PLATFORM: product, CSN: product_sn, HOSTNAME: server
SOURCE: fmd_msg_test, REV: 1.0
EVENT-ID: 12345678
-DESC: This entry tests URL variable expansion - url = http://sun.com/msg/TEST-8000-08
+DESC: This entry tests URL variable expansion - url = https://www.illumos.org/msg/TEST-8000-08
AUTO-RESPONSE: This entry tests the percent character escape sequence: %
IMPACT: This entry tests variable expansion for event payload members: uuid = 12345678, de scheme = fmd
REC-ACTION: Variable expansion for arrays: index = 1
@@ -65,32 +65,32 @@ REC-ACTION: Variable expansion for arrays: index = 1
code TEST-8000-08 item 0 = <<Defect>>
code TEST-8000-08 item 1 = <<Minor>>
-code TEST-8000-08 item 2 = <<This entry tests URL variable expansion - url = http://sun.com/msg/TEST-8000-08>>
+code TEST-8000-08 item 2 = <<This entry tests URL variable expansion - url = https://www.illumos.org/msg/TEST-8000-08>>
code TEST-8000-08 item 3 = <<This entry tests the percent character escape sequence: %>>
code TEST-8000-08 item 4 = <<This entry tests variable expansion for event payload members: uuid = %<uuid>, de scheme = %<de.scheme>>>
code TEST-8000-08 item 5 = <<Variable expansion for arrays: index = %<test_arr[1].index>>>
-code TEST-8000-08 item 6 = <<http://sun.com/msg/TEST-8000-08>>
+code TEST-8000-08 item 6 = <<https://www.illumos.org/msg/TEST-8000-08>>
SUNW-MSG-ID: TEST-8000-08, TYPE: Defect, VER: 1, SEVERITY: Minor
PLATFORM: -, CSN: -, HOSTNAME: -
SOURCE: -, REV: -
EVENT-ID: -
-DESC: This entry tests URL variable expansion - url = http://sun.com/msg/TEST-8000-08
+DESC: This entry tests URL variable expansion - url = https://www.illumos.org/msg/TEST-8000-08
AUTO-RESPONSE: This entry tests the percent character escape sequence: %
IMPACT: This entry tests variable expansion for event payload members: uuid = %<uuid>, de scheme = %<de.scheme>
REC-ACTION: Variable expansion for arrays: index = %<test_arr[1].index>
code TEST-8000-08 item 0 = <<Defect>>
code TEST-8000-08 item 1 = <<Minor>>
-code TEST-8000-08 item 2 = <<This entry tests URL variable expansion - url = http://sun.com/msg/TEST-8000-08>>
+code TEST-8000-08 item 2 = <<This entry tests URL variable expansion - url = https://www.illumos.org/msg/TEST-8000-08>>
code TEST-8000-08 item 3 = <<This entry tests the percent character escape sequence: %>>
code TEST-8000-08 item 4 = <<This entry tests variable expansion for event payload members: uuid = 12345678, de scheme = fmd>>
code TEST-8000-08 item 5 = <<Variable expansion for arrays: index = 1>>
-code TEST-8000-08 item 6 = <<http://sun.com/msg/TEST-8000-08>>
+code TEST-8000-08 item 6 = <<https://www.illumos.org/msg/TEST-8000-08>>
SUNW-MSG-ID: TEST-8000-08, TYPE: Defect, VER: 1, SEVERITY: Minor
PLATFORM: product, CSN: product_sn, HOSTNAME: server
SOURCE: fmd_msg_test, REV: 1.0
EVENT-ID: 12345678
-DESC: This entry tests URL variable expansion - url = http://sun.com/msg/TEST-8000-08
+DESC: This entry tests URL variable expansion - url = https://www.illumos.org/msg/TEST-8000-08
AUTO-RESPONSE: This entry tests the percent character escape sequence: %
IMPACT: This entry tests variable expansion for event payload members: uuid = 12345678, de scheme = fmd
REC-ACTION: Variable expansion for arrays: index = 1
diff --git a/usr/src/lib/fm/libfmd_snmp/mibs/SUN-FM-MIB.mib b/usr/src/lib/fm/libfmd_snmp/mibs/SUN-FM-MIB.mib
index b093cbc..7a70943 100644
--- a/usr/src/lib/fm/libfmd_snmp/mibs/SUN-FM-MIB.mib
+++ b/usr/src/lib/fm/libfmd_snmp/mibs/SUN-FM-MIB.mib
@@ -166,7 +166,7 @@ sunFmProblemCode OBJECT-TYPE
DESCRIPTION
"The SUNW-MSG-ID static message identifier for this class of
problem, as recorded by fmd(1M) and shown by fmdump(1M). The
- message identifier can be used as a key at http://sun.com/msg/"
+ message identifier can be used as a key at https://www.illumos.org/msg/"
::= { sunFmProblemEntry 3 }
sunFmProblemURL OBJECT-TYPE
diff --git a/usr/src/man/man1/svcs.1 b/usr/src/man/man1/svcs.1
index 807722e..ceb234b 100644
--- a/usr/src/man/man1/svcs.1
+++ b/usr/src/man/man1/svcs.1
@@ -720,7 +720,7 @@ example% svcs -x
svc:/application/print/server:default (LP print server)
State: disabled since Mon Feb 13 17:56:21 2006
Reason: Disabled by an administrator.
- See: http://sun.com/msg/SMF-8000-05
+ See: https://www.illumos.org/msg/SMF-8000-05
See: lpsched(1M)
Impact: 2 dependent services are not running. (Use -v for list.)
.fi
@@ -755,7 +755,7 @@ svc:/network/nfs/client:default (NFS client)
State: offline since Mon Feb 27 16:03:23 2006
Reason: Service svc:/network/nfs/status:default
is not running because a method failed repeatedly.
- See: http://sun.com/msg/SMF-8000-GE
+ See: https://www.illumos.org/msg/SMF-8000-GE
Path: svc:/network/nfs/client:default
svc:/network/nfs/nlockmgr:default
svc:/network/nfs/status:default
diff --git a/usr/src/man/man1m/fmadm.1m b/usr/src/man/man1m/fmadm.1m
index 88a71c3..e44a29b 100644
--- a/usr/src/man/man1m/fmadm.1m
+++ b/usr/src/man/man1m/fmadm.1m
@@ -49,7 +49,7 @@ The Fault Manager attempts to automate as many activities as possible, so use
of \fBfmadm\fR is typically not required. When the Fault Manager needs help
from a human administrator, service repair technician, or Sun, it produces a
message indicating its needs. It also refers you to a knowledge article on
-Sun's web site, http://www.sun.com/msg/. The web site might ask you to use
+Sun's web site, https://www.illumos.org/msg/. The web site might ask you to use
\fBfmadm\fR or one of the other fault management utilities to gather more
information or perform additional tasks. The documentation for \fBfmd\fR(1M),
\fBfmdump\fR(1M), and \fBfmstat\fR(1M) describe more about tools to observe
@@ -482,4 +482,4 @@ not-an-interface.
\fI\fR
.sp
.LP
-http://www.sun.com/msg/
+https://www.illumos.org/msg/
diff --git a/usr/src/man/man1m/fmd.1m b/usr/src/man/man1m/fmd.1m
index 2406727..fc2f2b3 100644
--- a/usr/src/man/man1m/fmd.1m
+++ b/usr/src/man/man1m/fmd.1m
@@ -22,7 +22,7 @@ activities such as disabling faulty components. When appropriate, the fault
manager also sends a message to the \fBsyslogd\fR(1M) service to notify an
administrator that a problem has been detected. The message directs
administrators to a knowledge article on Sun's web site,
-http://www.sun.com/msg/, which explains more about the problem impact and
+https://www.illumos.org/msg/, which explains more about the problem impact and
appropriate responses.
.sp
.LP
@@ -164,7 +164,7 @@ Interface Stability Evolving
\fBsyslogd\fR(1M), \fBattributes\fR(5), \fBsmf\fR(5)
.sp
.LP
-http://www.sun.com/msg/
+https://www.illumos.org/msg/
.SH NOTES
.sp
.LP
diff --git a/usr/src/man/man1m/fmdump.1m b/usr/src/man/man1m/fmdump.1m
index 0cfdd63..1e19e5a 100644
--- a/usr/src/man/man1m/fmdump.1m
+++ b/usr/src/man/man1m/fmdump.1m
@@ -88,7 +88,7 @@ particular problem across any set of systems
.ie t \(bu
.el o
A message identifier that can be used to access a corresponding knowledge
-article located at Sun's web site, http://www.sun.com/msg/
+article located at Sun's web site, https://www.illumos.org/msg/
.RE
.sp
.LP
@@ -96,7 +96,7 @@ If a problem requires action by a human administrator or service technician or
affects system behavior, the Fault Manager also issues a human-readable message
to \fBsyslogd\fR(1M). This message provides a summary of the problem and a
reference to the knowledge article on the Sun web site,
-http://www.sun.com/msg/.
+https://www.illumos.org/msg/.
.sp
.LP
You can use the \fB-v\fR and \fB-V\fR options to expand the display from a
@@ -138,7 +138,7 @@ Select events that match the specified class. The class argument can use the
glob pattern matching syntax described in \fBsh\fR(1). The class represents a
hierarchical classification string indicating the type of telemetry event. More
information about Sun's telemetry protocol is available at Sun's web site,
-http://www.sun.com/msg/.
+https://www.illumos.org/msg/.
.RE
.sp
@@ -651,7 +651,7 @@ Private. The human-readable fault log output is Evolving.
\fI\fR
.sp
.LP
-http://www.sun.com/msg/
+https://www.illumos.org/msg/
.SH NOTES
.sp
.LP
diff --git a/usr/src/uts/common/fs/zfs/spa.c b/usr/src/uts/common/fs/zfs/spa.c
index 5d9332a..87d6342 100644
--- a/usr/src/uts/common/fs/zfs/spa.c
+++ b/usr/src/uts/common/fs/zfs/spa.c
@@ -1952,7 +1952,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%lx). "
- "See: http://www.sun.com/msg/ZFS-8000-EY",
+ "See: https://www.illumos.org/msg/ZFS-8000-EY",
spa_name(spa), hostname,
(unsigned long)hostid);
return (EBADF);
diff --git a/usr/src/uts/common/os/fm.c b/usr/src/uts/common/os/fm.c
index 4efcff4..bf1c8d2 100644
--- a/usr/src/uts/common/os/fm.c
+++ b/usr/src/uts/common/os/fm.c
@@ -79,7 +79,7 @@
* URL and SUNW-MSG-ID value to display for fm_panic(), defined below. These
* values must be kept in sync with the FMA source code in usr/src/cmd/fm.
*/
-static const char *fm_url = "http://www.sun.com/msg";
+static const char *fm_url = "https://www.illumos.org/msg";
static const char *fm_msgid = "SUNOS-8000-0G";
static char *volatile fm_panicstr = NULL;
diff --git a/usr/src/uts/i86pc/dboot/dboot_startkern.c b/usr/src/uts/i86pc/dboot/dboot_startkern.c
index 2438dc6..ccf1f5c 100644
--- a/usr/src/uts/i86pc/dboot/dboot_startkern.c
+++ b/usr/src/uts/i86pc/dboot/dboot_startkern.c
@@ -1114,7 +1114,7 @@ build_page_tables(void)
The grub entry should be changed to:\n\
kernel$ /platform/i86pc/kernel/$ISADIR/unix\n\
module$ /platform/i86pc/$ISADIR/boot_archive\n\
-See http://www.sun.com/msg/SUNOS-8000-AK for details.\n"
+See https://www.illumos.org/msg/SUNOS-8000-AK for details.\n"
/*
* startup_kernel has a pretty simple job. It builds pagetables which reflect
#!/bin/ksh
WS="/ws/illumos-git"
# this list generated with:
# git grep -l sun.com.msg | cat -
#
FILE_LIST="
usr/src/cmd/boot/bootadm/bootadm_upgrade.c
usr/src/cmd/fm/dicts/ZFS.po
usr/src/cmd/fm/fmadm/common/faulty.c
usr/src/cmd/fm/modules/common/syslog-msgs/syslog.c
usr/src/cmd/svc/configd/backend.c
usr/src/cmd/svc/configd/restore_repository.sh
usr/src/cmd/svc/milestone/README.share
usr/src/cmd/svc/svcs/explain.c
usr/src/cmd/zpool/zpool_main.c
usr/src/lib/fm/libfmd_adm/common/fmd_adm.c
usr/src/lib/fm/libfmd_msg/common/fmd_msg.c
usr/src/lib/fm/libfmd_msg/common/fmd_msg_test.out
usr/src/lib/fm/libfmd_snmp/mibs/SUN-FM-MIB.mib
usr/src/man/man1/svcs.1
usr/src/man/man1m/fmadm.1m
usr/src/man/man1m/fmd.1m
usr/src/man/man1m/fmdump.1m
usr/src/uts/common/fs/zfs/spa.c
usr/src/uts/common/os/fm.c
usr/src/uts/i86pc/dboot/dboot_startkern.c
"
for fl in $FILE_LIST; do
echo "${WS}/${fl}"
sed -E -i '' \
-e 's,https*://(www.)*sun.com/msg,https://www.illumos.org/msg,g' \
"${WS}/${fl}"
done
@lackoma
Copy link

lackoma commented Dec 16, 2017

please help me to recover the data Message: ZFS-8000-5E I like to be financially cashier if I succeed

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment