Restructure into package format

This commit is contained in:
Mykola Grymalyuk
2024-03-31 21:27:36 -06:00
parent d02d89b9c0
commit 463bed4e06
81 changed files with 881 additions and 464 deletions

View File

@@ -0,0 +1,989 @@
"""
sys_patch.py: Framework for mounting and patching macOS root volume
"""
"""
System based off of Apple's Kernel Debug Kit (KDK)
- https://developer.apple.com/download/all/
The system relies on mounting the APFS volume as a live read/write volume
We perform our required edits, then create a new snapshot for the system boot
The manual process is as follows:
1. Find the Root Volume
'diskutil info / | grep "Device Node:"'
2. Convert Snapshot Device Node to Root Volume Device Node
/dev/disk3s1s1 -> /dev/disk3s1 (strip last 's1')
3. Mount the APFS volume as a read/write volume
'sudo mount -o nobrowse -t apfs /dev/disk5s5 /System/Volumes/Update/mnt1'
4. Perform edits to the system (ie. create new KernelCollection)
'sudo kmutil install --volume-root /System/Volumes/Update/mnt1/ --update-all'
5. Create a new snapshot for the system boot
'sudo bless --folder /System/Volumes/Update/mnt1/System/Library/CoreServices --bootefi --create-snapshot'
Additionally Apple's APFS snapshot system supports system rollbacks:
'sudo bless --mount /System/Volumes/Update/mnt1 --bootefi --last-sealed-snapshot'
Note: root volume rollbacks are unstable in Big Sur due to quickly discarding the original snapshot
- Generally within 2~ boots, the original snapshot is discarded
- Monterey always preserves the original snapshot allowing for reliable rollbacks
Alternative to mounting via 'mount', Apple's update system uses 'mount_apfs' directly
'/sbin/mount_apfs -R /dev/disk5s5 /System/Volumes/Update/mnt1'
With macOS Ventura, you will also need to install the KDK onto root if you plan to use kmutil
This is because Apple removed on-disk binaries (ref: https://github.com/dortania/OpenCore-Legacy-Patcher/issues/998)
'sudo ditto /Library/Developer/KDKs/<KDK Version>/System /System/Volumes/Update/mnt1/System'
"""
import logging
import plistlib
import subprocess
import applescript
from pathlib import Path
from datetime import datetime
from .. import constants
from ..utilities import utilities, kdk_handler
from ..sys_patch import sys_patch_detect, sys_patch_auto, sys_patch_helpers, sys_patch_generate
from ..datasets import os_data
class PatchSysVolume:
def __init__(self, model: str, global_constants: constants.Constants, hardware_details: list = None) -> None:
self.model = model
self.constants: constants.Constants = global_constants
self.computer = self.constants.computer
self.root_mount_path = None
self.root_supports_snapshot = utilities.check_if_root_is_apfs_snapshot()
self.constants.root_patcher_succeeded = False # Reset Variable each time we start
self.constants.needs_to_open_preferences = False
self.patch_set_dictionary = {}
self.needs_kmutil_exemptions = False # For '/Library/Extensions' rebuilds
self.kdk_path = None
# GUI will detect hardware patches before starting PatchSysVolume()
# However the TUI will not, so allow for data to be passed in manually avoiding multiple calls
if hardware_details is None:
hardware_details = sys_patch_detect.DetectRootPatch(self.computer.real_model, self.constants).detect_patch_set()
self.hardware_details = hardware_details
self._init_pathing(custom_root_mount_path=None, custom_data_mount_path=None)
self.skip_root_kmutil_requirement = self.hardware_details["Settings: Supports Auxiliary Cache"]
def _init_pathing(self, custom_root_mount_path: Path = None, custom_data_mount_path: Path = None) -> None:
"""
Initializes the pathing for root volume patching
Parameters:
custom_root_mount_path (Path): Custom path to mount the root volume
custom_data_mount_path (Path): Custom path to mount the data volume
"""
if custom_root_mount_path and custom_data_mount_path:
self.mount_location = custom_root_mount_path
self.data_mount_location = custom_data_mount_path
elif self.root_supports_snapshot is True:
# Big Sur and newer use APFS snapshots
self.mount_location = "/System/Volumes/Update/mnt1"
self.mount_location_data = ""
else:
self.mount_location = ""
self.mount_location_data = ""
self.mount_extensions = f"{self.mount_location}/System/Library/Extensions"
self.mount_application_support = f"{self.mount_location_data}/Library/Application Support"
def _mount_root_vol(self) -> bool:
"""
Attempts to mount the booted APFS volume as a writable volume
at /System/Volumes/Update/mnt1
Manual invocation:
'sudo mount -o nobrowse -t apfs /dev/diskXsY /System/Volumes/Update/mnt1'
Returns:
bool: True if successful, False if not
"""
# Returns boolean if Root Volume is available
self.root_mount_path = utilities.get_disk_path()
if self.root_mount_path.startswith("disk"):
logging.info(f"- Found Root Volume at: {self.root_mount_path}")
if Path(self.mount_extensions).exists():
logging.info("- Root Volume is already mounted")
return True
else:
if self.root_supports_snapshot is True:
logging.info("- Mounting APFS Snapshot as writable")
result = utilities.elevated(["mount", "-o", "nobrowse", "-t", "apfs", f"/dev/{self.root_mount_path}", self.mount_location], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode == 0:
logging.info(f"- Mounted APFS Snapshot as writable at: {self.mount_location}")
if Path(self.mount_extensions).exists():
logging.info("- Successfully mounted the Root Volume")
return True
else:
logging.info("- Root Volume appears to have unmounted unexpectedly")
else:
logging.info("- Unable to mount APFS Snapshot as writable")
logging.info("Reason for mount failure:")
logging.info(result.stdout.decode().strip())
return False
def _merge_kdk_with_root(self, save_hid_cs=False) -> None:
"""
Merge Kernel Debug Kit (KDK) with the root volume
If no KDK is present, will call kdk_handler to download and install it
Parameters:
save_hid_cs (bool): If True, will save the HID CS file before merging KDK
Required for USB 1.1 downgrades on Ventura and newer
"""
if self.skip_root_kmutil_requirement is True:
return
if self.constants.detected_os < os_data.os_data.ventura:
return
if self.constants.kdk_download_path.exists():
if kdk_handler.KernelDebugKitUtilities().install_kdk_dmg(self.constants.kdk_download_path) is False:
logging.info("Failed to install KDK")
raise Exception("Failed to install KDK")
kdk_obj = kdk_handler.KernelDebugKitObject(self.constants, self.constants.detected_os_build, self.constants.detected_os_version)
if kdk_obj.success is False:
logging.info(f"Unable to get KDK info: {kdk_obj.error_msg}")
raise Exception(f"Unable to get KDK info: {kdk_obj.error_msg}")
if kdk_obj.kdk_already_installed is False:
kdk_download_obj = kdk_obj.retrieve_download()
if not kdk_download_obj:
logging.info(f"Could not retrieve KDK: {kdk_obj.error_msg}")
# Hold thread until download is complete
kdk_download_obj.download(spawn_thread=False)
if kdk_download_obj.download_complete is False:
error_msg = kdk_download_obj.error_msg
logging.info(f"Could not download KDK: {error_msg}")
raise Exception(f"Could not download KDK: {error_msg}")
if kdk_obj.validate_kdk_checksum() is False:
logging.info(f"KDK checksum validation failed: {kdk_obj.error_msg}")
raise Exception(f"KDK checksum validation failed: {kdk_obj.error_msg}")
kdk_handler.KernelDebugKitUtilities().install_kdk_dmg(self.constants.kdk_download_path)
# re-init kdk_obj to get the new kdk_installed_path
kdk_obj = kdk_handler.KernelDebugKitObject(self.constants, self.constants.detected_os_build, self.constants.detected_os_version)
if kdk_obj.success is False:
logging.info(f"Unable to get KDK info: {kdk_obj.error_msg}")
raise Exception(f"Unable to get KDK info: {kdk_obj.error_msg}")
if kdk_obj.kdk_already_installed is False:
# We shouldn't get here, but just in case
logging.warning(f"KDK was not installed, but should have been: {kdk_obj.error_msg}")
raise Exception("KDK was not installed, but should have been: {kdk_obj.error_msg}")
kdk_path = Path(kdk_obj.kdk_installed_path) if kdk_obj.kdk_installed_path != "" else None
oclp_plist = Path("/System/Library/CoreServices/OpenCore-Legacy-Patcher.plist")
if (Path(self.mount_location) / Path("System/Library/Extensions/System.kext/PlugIns/Libkern.kext/Libkern")).exists() and oclp_plist.exists():
# KDK was already merged, check if the KDK used is the same as the one we're using
# If not, we'll rsync over with the new KDK
try:
oclp_plist_data = plistlib.load(open(oclp_plist, "rb"))
if "Kernel Debug Kit Used" in oclp_plist_data:
if oclp_plist_data["Kernel Debug Kit Used"] == str(kdk_path):
logging.info("- Matching KDK determined to already be merged, skipping")
return
except:
pass
if kdk_path is None:
logging.info(f"- Unable to find Kernel Debug Kit")
raise Exception("Unable to find Kernel Debug Kit")
self.kdk_path = kdk_path
logging.info(f"- Found KDK at: {kdk_path}")
# Due to some IOHIDFamily oddities, we need to ensure their CodeSignature is retained
cs_path = Path(self.mount_location) / Path("System/Library/Extensions/IOHIDFamily.kext/Contents/PlugIns/IOHIDEventDriver.kext/Contents/_CodeSignature")
if save_hid_cs is True and cs_path.exists():
logging.info("- Backing up IOHIDEventDriver CodeSignature")
# Note it's a folder, not a file
utilities.elevated(["/bin/cp", "-r", cs_path, f"{self.constants.payload_path}/IOHIDEventDriver_CodeSignature.bak"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logging.info(f"- Merging KDK with Root Volume: {kdk_path.name}")
utilities.elevated(
# Only merge '/System/Library/Extensions'
# 'Kernels' and 'KernelSupport' is wasted space for root patching (we don't care above dev kernels)
["rsync", "-r", "-i", "-a", f"{kdk_path}/System/Library/Extensions/", f"{self.mount_location}/System/Library/Extensions"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
# During reversing, we found that kmutil uses this path to determine whether the KDK was successfully merged
# Best to verify now before we cause any damage
if not (Path(self.mount_location) / Path("System/Library/Extensions/System.kext/PlugIns/Libkern.kext/Libkern")).exists():
logging.info("- Failed to merge KDK with Root Volume")
raise Exception("Failed to merge KDK with Root Volume")
logging.info("- Successfully merged KDK with Root Volume")
# Restore IOHIDEventDriver CodeSignature
if save_hid_cs is True and Path(f"{self.constants.payload_path}/IOHIDEventDriver_CodeSignature.bak").exists():
logging.info("- Restoring IOHIDEventDriver CodeSignature")
if not cs_path.exists():
logging.info(" - CodeSignature folder missing, creating")
utilities.elevated(["/bin/mkdir", "-p", cs_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
utilities.elevated(["/bin/cp", "-r", f"{self.constants.payload_path}/IOHIDEventDriver_CodeSignature.bak", cs_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
utilities.elevated(["/bin/rm", "-rf", f"{self.constants.payload_path}/IOHIDEventDriver_CodeSignature.bak"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def _unpatch_root_vol(self):
"""
Reverts APFS snapshot and cleans up any changes made to the root and data volume
"""
if self.constants.detected_os <= os_data.os_data.big_sur or self.root_supports_snapshot is False:
logging.info("- OS version does not support snapshotting, skipping revert")
logging.info("- Reverting to last signed APFS snapshot")
result = utilities.elevated(["bless", "--mount", self.mount_location, "--bootefi", "--last-sealed-snapshot"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode != 0:
logging.info("- Unable to revert root volume patches")
logging.info("Reason for unpatch Failure:")
logging.info(result.stdout.decode())
logging.info("- Failed to revert snapshot via Apple's 'bless' command")
else:
self._clean_skylight_plugins()
self._delete_nonmetal_enforcement()
self._clean_auxiliary_kc()
self.constants.root_patcher_succeeded = True
logging.info("- Unpatching complete")
logging.info("\nPlease reboot the machine for patches to take effect")
def _rebuild_root_volume(self) -> bool:
"""
Rebuilds the Root Volume:
- Rebuilds the Kernel Collection
- Updates the Preboot Kernel Cache
- Rebuilds the dyld Shared Cache
- Creates a new APFS Snapshot
Returns:
bool: True if successful, False if not
"""
if self._rebuild_kernel_collection() is True:
self._update_preboot_kernel_cache()
self._rebuild_dyld_shared_cache()
if self._create_new_apfs_snapshot() is True:
logging.info("- Patching complete")
logging.info("\nPlease reboot the machine for patches to take effect")
if self.needs_kmutil_exemptions is True:
logging.info("Note: Apple will require you to open System Preferences -> Security to allow the new kernel extensions to be loaded")
self.constants.root_patcher_succeeded = True
return True
return False
def _rebuild_kernel_collection(self) -> bool:
"""
Rebuilds the Kernel Collection
Supports following KC generation:
- Boot/SysKC (11.0+)
- AuxKC (11.0+)
- PrelinkedKernel (10.15-)
Returns:
bool: True if successful, False if not
"""
logging.info("- Rebuilding Kernel Cache (This may take some time)")
if self.constants.detected_os > os_data.os_data.catalina:
# Base Arguments
args = ["kmutil", "install"]
if self.skip_root_kmutil_requirement is True:
# Only rebuild the Auxiliary Kernel Collection
args.append("--new")
args.append("aux")
args.append("--boot-path")
args.append(f"{self.mount_location}/System/Library/KernelCollections/BootKernelExtensions.kc")
args.append("--system-path")
args.append(f"{self.mount_location}/System/Library/KernelCollections/SystemKernelExtensions.kc")
else:
# Rebuild Boot, System and Auxiliary Kernel Collections
args.append("--volume-root")
args.append(self.mount_location)
# Build Boot, Sys and Aux KC
args.append("--update-all")
# If multiple kernels found, only build release KCs
args.append("--variant-suffix")
args.append("release")
if self.constants.detected_os >= os_data.os_data.ventura:
# With Ventura, we're required to provide a KDK in some form
# to rebuild the Kernel Cache
#
# However since we already merged the KDK onto root with 'ditto',
# We can add '--allow-missing-kdk' to skip parsing the KDK
#
# This allows us to only delete/overwrite kexts inside of
# /System/Library/Extensions and not the entire KDK
args.append("--allow-missing-kdk")
# 'install' and '--update-all' cannot be used together in Ventura.
# kmutil will request the usage of 'create' instead:
# Warning: kmutil install's usage of --update-all is deprecated.
# Use kmutil create --update-install instead'
args[1] = "create"
if self.needs_kmutil_exemptions is True:
# When installing to '/Library/Extensions', following args skip kext consent
# prompt in System Preferences when SIP's disabled
logging.info(" (You will get a prompt by System Preferences, ignore for now)")
args.append("--no-authentication")
args.append("--no-authorization")
else:
args = ["kextcache", "-i", f"{self.mount_location}/"]
result = utilities.elevated(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# kextcache notes:
# - kextcache always returns 0, even if it fails
# - Check the output for 'KernelCache ID' to see if the cache was successfully rebuilt
# kmutil notes:
# - will return 71 on failure to build KCs
# - will return 31 on 'No binaries or codeless kexts were provided'
# - will return -10 if the volume is missing (ie. unmounted by another process)
if result.returncode != 0 or (self.constants.detected_os < os_data.os_data.catalina and "KernelCache ID" not in result.stdout.decode()):
logging.info("- Unable to build new kernel cache")
logging.info(f"\nReason for Patch Failure ({result.returncode}):")
logging.info(result.stdout.decode())
logging.info("")
logging.info("\nPlease reboot the machine to avoid potential issues rerunning the patcher")
return False
if self.skip_root_kmutil_requirement is True:
# Force rebuild the Auxiliary KC
result = utilities.elevated(["/usr/bin/killall", "syspolicyd", "kernelmanagerd"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode != 0:
logging.info("- Unable to remove kernel extension policy files")
logging.info(f"\nReason for Patch Failure ({result.returncode}):")
logging.info(result.stdout.decode())
logging.info("")
logging.info("\nPlease reboot the machine to avoid potential issues rerunning the patcher")
return False
for file in ["KextPolicy", "KextPolicy-shm", "KextPolicy-wal"]:
self._remove_file("/private/var/db/SystemPolicyConfiguration/", file)
else:
# Install RSRHelper utility to handle desynced KCs
sys_patch_helpers.SysPatchHelpers(self.constants).install_rsr_repair_binary()
logging.info("- Successfully built new kernel cache")
return True
def _create_new_apfs_snapshot(self) -> bool:
"""
Creates a new APFS snapshot of the root volume
Returns:
bool: True if snapshot was created, False if not
"""
if self.root_supports_snapshot is True:
logging.info("- Creating new APFS snapshot")
bless = utilities.elevated(
[
"bless",
"--folder", f"{self.mount_location}/System/Library/CoreServices",
"--bootefi", "--create-snapshot"
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if bless.returncode != 0:
logging.info("- Unable to create new snapshot")
logging.info("Reason for snapshot failure:")
logging.info(bless.stdout.decode())
if "Can't use last-sealed-snapshot or create-snapshot on non system volume" in bless.stdout.decode():
logging.info("- This is an APFS bug with Monterey and newer! Perform a clean installation to ensure your APFS volume is built correctly")
return False
self._unmount_drive()
return True
def _unmount_drive(self) -> None:
"""
Unmount root volume
"""
if self.root_mount_path:
logging.info("- Unmounting Root Volume (Don't worry if this fails)")
utilities.elevated(["/usr/sbin/diskutil", "unmount", self.root_mount_path], stdout=subprocess.PIPE).stdout.decode().strip().encode()
else:
logging.info("- Skipping Root Volume unmount")
def _rebuild_dyld_shared_cache(self) -> None:
"""
Rebuild the dyld shared cache
Only required on Mojave and older
"""
if self.constants.detected_os > os_data.os_data.catalina:
return
logging.info("- Rebuilding dyld shared cache")
utilities.process_status(utilities.elevated(["update_dyld_shared_cache", "-root", f"{self.mount_location}/"]))
def _update_preboot_kernel_cache(self) -> None:
"""
Update the preboot kernel cache
Only required on Catalina
"""
if self.constants.detected_os == os_data.os_data.catalina:
logging.info("- Rebuilding preboot kernel cache")
utilities.process_status(utilities.elevated(["kcditto"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
def _clean_skylight_plugins(self) -> None:
"""
Clean non-Metal's SkylightPlugins folder
"""
if (Path(self.mount_application_support) / Path("SkyLightPlugins/")).exists():
logging.info("- Found SkylightPlugins folder, removing old plugins")
utilities.process_status(utilities.elevated(["/bin/rm", "-Rf", f"{self.mount_application_support}/SkyLightPlugins"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
utilities.process_status(utilities.elevated(["/bin/mkdir", f"{self.mount_application_support}/SkyLightPlugins"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
else:
logging.info("- Creating SkylightPlugins folder")
utilities.process_status(utilities.elevated(["/bin/mkdir", "-p", f"{self.mount_application_support}/SkyLightPlugins/"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
def _delete_nonmetal_enforcement(self) -> None:
"""
Remove defaults related to forced OpenGL rendering
Primarily for development purposes
"""
for arg in ["useMetal", "useIOP"]:
result = subprocess.run(["/usr/bin/defaults", "read", "/Library/Preferences/com.apple.CoreDisplay", arg], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.decode("utf-8").strip()
if result in ["0", "false", "1", "true"]:
logging.info(f"- Removing non-Metal Enforcement Preference: {arg}")
utilities.elevated(["/usr/bin/defaults", "delete", "/Library/Preferences/com.apple.CoreDisplay", arg])
def _clean_auxiliary_kc(self) -> None:
"""
Clean the Auxiliary Kernel Collection
Logic:
When reverting root volume patches, the AuxKC will still retain the UUID
it was built against. Thus when Boot/SysKC are reverted, Aux will break
To resolve this, delete all installed kexts in /L*/E* and rebuild the AuxKC
We can verify our binaries based off the OpenCore-Legacy-Patcher.plist file
"""
if self.constants.detected_os < os_data.os_data.big_sur:
return
logging.info("- Cleaning Auxiliary Kernel Collection")
oclp_path = "/System/Library/CoreServices/OpenCore-Legacy-Patcher.plist"
if Path(oclp_path).exists():
oclp_plist_data = plistlib.load(Path(oclp_path).open("rb"))
for key in oclp_plist_data:
if isinstance(oclp_plist_data[key], (bool, int)):
continue
if "Install" not in oclp_plist_data[key]:
continue
for location in oclp_plist_data[key]["Install"]:
if not location.endswith("Extensions"):
continue
for file in oclp_plist_data[key]["Install"][location]:
if not file.endswith(".kext"):
continue
self._remove_file("/Library/Extensions", file)
# Handle situations where users migrated from older OSes with a lot of garbage in /L*/E*
# ex. Nvidia Web Drivers, NetUSB, dosdude1's patches, etc.
# Move if file's age is older than October 2021 (year before Ventura)
if self.constants.detected_os < os_data.os_data.ventura:
return
relocation_path = "/Library/Relocated Extensions"
if not Path(relocation_path).exists():
utilities.elevated(["/bin/mkdir", relocation_path])
for file in Path("/Library/Extensions").glob("*.kext"):
try:
if datetime.fromtimestamp(file.stat().st_mtime) < datetime(2021, 10, 1):
logging.info(f" - Relocating {file.name} kext to {relocation_path}")
if Path(relocation_path) / Path(file.name).exists():
utilities.elevated(["/bin/rm", "-Rf", relocation_path / Path(file.name)])
utilities.elevated(["/bin/mv", file, relocation_path])
except:
# Some users have the most cursed /L*/E* folders
# ex. Symlinks pointing to symlinks pointing to dead files
pass
def _write_patchset(self, patchset: dict) -> None:
"""
Write patchset information to Root Volume
Parameters:
patchset (dict): Patchset information (generated by GenerateRootPatchSets)
"""
destination_path = f"{self.mount_location}/System/Library/CoreServices"
file_name = "OpenCore-Legacy-Patcher.plist"
destination_path_file = f"{destination_path}/{file_name}"
if sys_patch_helpers.SysPatchHelpers(self.constants).generate_patchset_plist(patchset, file_name, self.kdk_path):
logging.info("- Writing patchset information to Root Volume")
if Path(destination_path_file).exists():
utilities.process_status(utilities.elevated(["/bin/rm", destination_path_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
utilities.process_status(utilities.elevated(["/bin/cp", f"{self.constants.payload_path}/{file_name}", destination_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
def _add_auxkc_support(self, install_file: str, source_folder_path: str, install_patch_directory: str, destination_folder_path: str) -> str:
"""
Patch provided Kext to support Auxiliary Kernel Collection
Logic:
In macOS Ventura, KDKs are required to build new Boot and System KCs
However for some patch sets, we're able to use the Auxiliary KCs with '/Library/Extensions'
kernelmanagerd determines which kext is installed by their 'OSBundleRequired' entry
If a kext is labeled as 'OSBundleRequired: Root' or 'OSBundleRequired: Safe Boot',
kernelmanagerd will require the kext to be installed in the Boot/SysKC
Additionally, kexts starting with 'com.apple.' are not natively allowed to be installed
in the AuxKC. So we need to explicitly set our 'OSBundleRequired' to 'Auxiliary'
Parameters:
install_file (str): Kext file name
source_folder_path (str): Source folder path
install_patch_directory (str): Patch directory
destination_folder_path (str): Destination folder path
Returns:
str: Updated destination folder path
"""
if self.skip_root_kmutil_requirement is False:
return destination_folder_path
if not install_file.endswith(".kext"):
return destination_folder_path
if install_patch_directory != "/System/Library/Extensions":
return destination_folder_path
if self.constants.detected_os < os_data.os_data.ventura:
return destination_folder_path
updated_install_location = str(self.mount_location_data) + "/Library/Extensions"
logging.info(f" - Adding AuxKC support to {install_file}")
plist_path = Path(Path(source_folder_path) / Path(install_file) / Path("Contents/Info.plist"))
plist_data = plistlib.load((plist_path).open("rb"))
# Check if we need to update the 'OSBundleRequired' entry
if not plist_data["CFBundleIdentifier"].startswith("com.apple."):
return updated_install_location
if "OSBundleRequired" in plist_data:
if plist_data["OSBundleRequired"] == "Auxiliary":
return updated_install_location
plist_data["OSBundleRequired"] = "Auxiliary"
plistlib.dump(plist_data, plist_path.open("wb"))
self._check_kexts_needs_authentication(install_file)
return updated_install_location
def _check_kexts_needs_authentication(self, kext_name: str):
"""
Verify whether the user needs to authenticate in System Preferences
Sets 'needs_to_open_preferences' to True if the kext is not in the AuxKC
Logic:
Under 'private/var/db/KernelManagement/AuxKC/CurrentAuxKC/com.apple.kcgen.instructions.plist'
["kextsToBuild"][i]:
["bundlePathMainOS"] = /Library/Extensions/Test.kext
["cdHash"] = Bundle's CDHash (random on ad-hoc signed, static on dev signed)
["teamID"] = Team ID (blank on ad-hoc signed)
To grab the CDHash of a kext, run 'codesign -dvvv <kext_path>'
Parameters:
kext_name (str): Name of the kext to check
"""
try:
aux_cache_path = Path(self.mount_location_data) / Path("/private/var/db/KernelExtensionManagement/AuxKC/CurrentAuxKC/com.apple.kcgen.instructions.plist")
if aux_cache_path.exists():
aux_cache_data = plistlib.load((aux_cache_path).open("rb"))
for kext in aux_cache_data["kextsToBuild"]:
if "bundlePathMainOS" in aux_cache_data["kextsToBuild"][kext]:
if aux_cache_data["kextsToBuild"][kext]["bundlePathMainOS"] == f"/Library/Extensions/{kext_name}":
return
except PermissionError:
pass
logging.info(f" - {kext_name} requires authentication in System Preferences")
self.constants.needs_to_open_preferences = True # Notify in GUI to open System Preferences
def _patch_root_vol(self):
"""
Patch root volume
"""
logging.info(f"- Running patches for {self.model}")
if self.patch_set_dictionary != {}:
self._execute_patchset(self.patch_set_dictionary)
else:
self._execute_patchset(sys_patch_generate.GenerateRootPatchSets(self.computer.real_model, self.constants, self.hardware_details).patchset)
if self.constants.wxpython_variant is True and self.constants.detected_os >= os_data.os_data.big_sur:
needs_daemon = False
if self.constants.detected_os >= os_data.os_data.ventura and self.skip_root_kmutil_requirement is False:
needs_daemon = True
sys_patch_auto.AutomaticSysPatch(self.constants).install_auto_patcher_launch_agent(kdk_caching_needed=needs_daemon)
self._rebuild_root_volume()
def _execute_patchset(self, required_patches: dict):
"""
Executes provided patchset
Parameters:
required_patches (dict): Patchset to execute (generated by sys_patch_generate.GenerateRootPatchSets)
"""
source_files_path = str(self.constants.payload_local_binaries_root_path)
self._preflight_checks(required_patches, source_files_path)
for patch in required_patches:
logging.info("- Installing Patchset: " + patch)
for method_remove in ["Remove", "Remove Non-Root"]:
if method_remove in required_patches[patch]:
for remove_patch_directory in required_patches[patch][method_remove]:
logging.info("- Remove Files at: " + remove_patch_directory)
for remove_patch_file in required_patches[patch][method_remove][remove_patch_directory]:
if method_remove == "Remove":
destination_folder_path = str(self.mount_location) + remove_patch_directory
else:
destination_folder_path = str(self.mount_location_data) + remove_patch_directory
self._remove_file(destination_folder_path, remove_patch_file)
for method_install in ["Install", "Install Non-Root"]:
if method_install in required_patches[patch]:
for install_patch_directory in list(required_patches[patch][method_install]):
logging.info(f"- Handling Installs in: {install_patch_directory}")
for install_file in list(required_patches[patch][method_install][install_patch_directory]):
source_folder_path = source_files_path + "/" + required_patches[patch][method_install][install_patch_directory][install_file] + install_patch_directory
if method_install == "Install":
destination_folder_path = str(self.mount_location) + install_patch_directory
else:
if install_patch_directory == "/Library/Extensions":
self.needs_kmutil_exemptions = True
self._check_kexts_needs_authentication(install_file)
destination_folder_path = str(self.mount_location_data) + install_patch_directory
updated_destination_folder_path = self._add_auxkc_support(install_file, source_folder_path, install_patch_directory, destination_folder_path)
if destination_folder_path != updated_destination_folder_path:
# Update required_patches to reflect the new destination folder path
if updated_destination_folder_path not in required_patches[patch][method_install]:
required_patches[patch][method_install].update({updated_destination_folder_path: {}})
required_patches[patch][method_install][updated_destination_folder_path].update({install_file: required_patches[patch][method_install][install_patch_directory][install_file]})
required_patches[patch][method_install][install_patch_directory].pop(install_file)
destination_folder_path = updated_destination_folder_path
self._install_new_file(source_folder_path, destination_folder_path, install_file)
if "Processes" in required_patches[patch]:
for process in required_patches[patch]["Processes"]:
# Some processes need sudo, however we cannot directly call sudo in some scenarios
# Instead, call elevated funtion if string's boolean is True
if required_patches[patch]["Processes"][process] is True:
logging.info(f"- Running Process as Root:\n{process}")
utilities.process_status(utilities.elevated(process.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
else:
logging.info(f"- Running Process:\n{process}")
utilities.process_status(subprocess.run(process, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True))
if any(x in required_patches for x in ["AMD Legacy GCN", "AMD Legacy Polaris", "AMD Legacy Vega"]):
sys_patch_helpers.SysPatchHelpers(self.constants).disable_window_server_caching()
if "Metal 3802 Common Extended" in required_patches:
sys_patch_helpers.SysPatchHelpers(self.constants).patch_gpu_compiler_libraries(mount_point=self.mount_location)
self._write_patchset(required_patches)
def _preflight_checks(self, required_patches: dict, source_files_path: Path) -> None:
"""
Runs preflight checks before patching
Parameters:
required_patches (dict): Patchset dictionary (from sys_patch_generate.GenerateRootPatchSets)
source_files_path (Path): Path to the source files (PatcherSupportPkg)
"""
logging.info("- Running Preflight Checks before patching")
# Make sure old SkyLight plugins aren't being used
self._clean_skylight_plugins()
# Make sure non-Metal Enforcement preferences are not present
self._delete_nonmetal_enforcement()
# Make sure we clean old kexts in /L*/E* that are not in the patchset
self._clean_auxiliary_kc()
# Make sure SNB kexts are compatible with the host
if "Intel Sandy Bridge" in required_patches:
sys_patch_helpers.SysPatchHelpers(self.constants).snb_board_id_patch(source_files_path)
for patch in required_patches:
# Check if all files are present
for method_type in ["Install", "Install Non-Root"]:
if method_type in required_patches[patch]:
for install_patch_directory in required_patches[patch][method_type]:
for install_file in required_patches[patch][method_type][install_patch_directory]:
source_file = source_files_path + "/" + required_patches[patch][method_type][install_patch_directory][install_file] + install_patch_directory + "/" + install_file
if not Path(source_file).exists():
raise Exception(f"Failed to find {source_file}")
# Ensure KDK is properly installed
self._merge_kdk_with_root(save_hid_cs=True if "Legacy USB 1.1" in required_patches else False)
logging.info("- Finished Preflight, starting patching")
def _install_new_file(self, source_folder: Path, destination_folder: Path, file_name: str) -> None:
"""
Installs a new file to the destination folder
File handling logic:
- .frameworks are merged with the destination folder
- Other files are deleted and replaced (ex. .kexts, .apps)
Parameters:
source_folder (Path): Path to the source folder
destination_folder (Path): Path to the destination folder
file_name (str): Name of the file to install
"""
file_name_str = str(file_name)
if not Path(destination_folder).exists():
logging.info(f" - Skipping {file_name}, cannot locate {source_folder}")
return
if file_name_str.endswith(".framework"):
# merge with rsync
logging.info(f" - Installing: {file_name}")
utilities.elevated(["rsync", "-r", "-i", "-a", f"{source_folder}/{file_name}", f"{destination_folder}/"], stdout=subprocess.PIPE)
self._fix_permissions(destination_folder + "/" + file_name)
elif Path(source_folder + "/" + file_name_str).is_dir():
# Applicable for .kext, .app, .plugin, .bundle, all of which are directories
if Path(destination_folder + "/" + file_name).exists():
logging.info(f" - Found existing {file_name}, overwriting...")
utilities.process_status(utilities.elevated(["/bin/rm", "-R", f"{destination_folder}/{file_name}"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
else:
logging.info(f" - Installing: {file_name}")
utilities.process_status(utilities.elevated(["/bin/cp", "-R", f"{source_folder}/{file_name}", destination_folder], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
self._fix_permissions(destination_folder + "/" + file_name)
else:
# Assume it's an individual file, replace as normal
if Path(destination_folder + "/" + file_name).exists():
logging.info(f" - Found existing {file_name}, overwriting...")
utilities.process_status(utilities.elevated(["/bin/rm", f"{destination_folder}/{file_name}"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
else:
logging.info(f" - Installing: {file_name}")
utilities.process_status(utilities.elevated(["/bin/cp", f"{source_folder}/{file_name}", destination_folder], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
self._fix_permissions(destination_folder + "/" + file_name)
def _remove_file(self, destination_folder: Path, file_name: str) -> None:
"""
Removes a file from the destination folder
Parameters:
destination_folder (Path): Path to the destination folder
file_name (str): Name of the file to remove
"""
if Path(destination_folder + "/" + file_name).exists():
logging.info(f" - Removing: {file_name}")
if Path(destination_folder + "/" + file_name).is_dir():
utilities.process_status(utilities.elevated(["/bin/rm", "-R", f"{destination_folder}/{file_name}"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
else:
utilities.process_status(utilities.elevated(["/bin/rm", f"{destination_folder}/{file_name}"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
def _fix_permissions(self, destination_file: Path) -> None:
"""
Fix file permissions for a given file or directory
"""
chmod_args = ["chmod", "-Rf", "755", destination_file]
chown_args = ["chown", "-Rf", "root:wheel", destination_file]
if not Path(destination_file).is_dir():
# Strip recursive arguments
chmod_args.pop(1)
chown_args.pop(1)
utilities.process_status(utilities.elevated(chmod_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
utilities.process_status(utilities.elevated(chown_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
def _check_files(self) -> bool:
"""
Check if all files are present (primarily PatcherSupportPkg resources)
Returns:
bool: True if all files are present, False otherwise
"""
if Path(self.constants.payload_local_binaries_root_path).exists():
logging.info("- Local PatcherSupportPkg resources available, continuing...")
return True
if Path(self.constants.payload_local_binaries_root_path_dmg).exists():
logging.info("- Local PatcherSupportPkg resources available, mounting...")
output = subprocess.run(
[
"/usr/bin/hdiutil", "attach", "-noverify", f"{self.constants.payload_local_binaries_root_path_dmg}",
"-mountpoint", Path(self.constants.payload_path / Path("Universal-Binaries")),
"-nobrowse",
"-shadow", Path(self.constants.payload_path / Path("Universal-Binaries_overlay")),
"-passphrase", "password"
],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if output.returncode != 0:
logging.info("- Failed to mount Universal-Binaries.dmg")
logging.info(f"Output: {output.stdout.decode()}")
logging.info(f"Return Code: {output.returncode}")
return False
logging.info("- Mounted Universal-Binaries.dmg")
if self.constants.cli_mode is False and Path(self.constants.overlay_psp_path_dmg).exists() and Path("~/.dortania_developer").expanduser().exists():
icon_path = str(self.constants.app_icon_path).replace("/", ":")[1:]
msg = "Welcome to the DortaniaInternal Program, please provided the decryption key to access internal resources. Press cancel to skip."
password = Path("~/.dortania_developer_key").expanduser().read_text().strip() if Path("~/.dortania_developer_key").expanduser().exists() else ""
for i in range(3):
try:
if password == "":
password = applescript.AppleScript(
f"""
set theResult to display dialog "{msg}" default answer "" with hidden answer with title "OpenCore Legacy Patcher" with icon file "{icon_path}"
return the text returned of theResult
"""
).run()
result = subprocess.run(
[
"/usr/bin/hdiutil", "attach", "-noverify", f"{self.constants.overlay_psp_path_dmg}",
"-mountpoint", Path(self.constants.payload_path / Path("DortaniaInternal")),
"-nobrowse",
"-passphrase", password
],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if result.returncode == 0:
logging.info("- Mounted DortaniaInternal resources")
result = subprocess.run(
[
"/usr/bin/ditto", f"{self.constants.payload_path / Path('DortaniaInternal')}", f"{self.constants.payload_path / Path('Universal-Binaries')}"
],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if result.returncode == 0:
return True
logging.info("- Failed to merge DortaniaInternal resources")
logging.info(f"Output: {result.stdout.decode()}")
logging.info(f"Return Code: {result.returncode}")
return False
logging.info("- Failed to mount DortaniaInternal resources")
logging.info(f"Output: {result.stdout.decode()}")
logging.info(f"Return Code: {result.returncode}")
if "Authentication error" not in result.stdout.decode():
try:
# Display that the disk image might be corrupted
applescript.AppleScript(
f"""
display dialog "Failed to mount DortaniaInternal resources, please file an internal radar:\n\n{result.stdout.decode()}" with title "OpenCore Legacy Patcher" with icon file "{icon_path}"
"""
).run()
return False
except Exception as e:
pass
break
msg = f"Decryption failed, please try again. {2 - i} attempts remaining. "
password = ""
if i == 2:
applescript.AppleScript(
f"""
display dialog "Failed to mount DortaniaInternal resources, too many incorrect passwords. If this continues with the correct decryption key, please file an internal radar." with title "OpenCore Legacy Patcher" with icon file "{icon_path}"
"""
).run()
return False
except Exception as e:
break
return True
logging.info("- PatcherSupportPkg resources missing, Patcher likely corrupted!!!")
return False
# Entry Function
def start_patch(self):
"""
Entry function for the patching process
"""
logging.info("- Starting Patch Process")
logging.info(f"- Determining Required Patch set for Darwin {self.constants.detected_os}")
self.patch_set_dictionary = sys_patch_generate.GenerateRootPatchSets(self.computer.real_model, self.constants, self.hardware_details).patchset
if self.patch_set_dictionary == {}:
logging.info("- No Root Patches required for your machine!")
return
logging.info("- Verifying whether Root Patching possible")
if sys_patch_detect.DetectRootPatch(self.computer.real_model, self.constants).verify_patch_allowed(print_errors=not self.constants.wxpython_variant) is True:
logging.info("- Patcher is capable of patching")
if self._check_files():
if self._mount_root_vol() is True:
self._patch_root_vol()
else:
logging.info("- Recommend rebooting the machine and trying to patch again")
def start_unpatch(self) -> None:
"""
Entry function for unpatching the root volume
"""
logging.info("- Starting Unpatch Process")
if sys_patch_detect.DetectRootPatch(self.computer.real_model, self.constants).verify_patch_allowed(print_errors=True) is True:
if self._mount_root_vol() is True:
self._unpatch_root_vol()
else:
logging.info("- Recommend rebooting the machine and trying to patch again")

View File

@@ -0,0 +1,462 @@
"""
sys_patch_auto.py: Library of functions for launch services, including automatic patching
"""
import wx
import wx.html2
import hashlib
import logging
import plistlib
import requests
import markdown2
import subprocess
import webbrowser
from pathlib import Path
from .. import constants
from ..datasets import css_data
from ..sys_patch import sys_patch_detect
from ..wx_gui import (
gui_entry,
gui_support
)
from ..utilities import (
utilities,
updates,
global_settings,
network_handler
)
class AutomaticSysPatch:
"""
Library of functions for launch agent, including automatic patching
"""
def __init__(self, global_constants: constants.Constants):
self.constants: constants.Constants = global_constants
def start_auto_patch(self):
"""
Initiates automatic patching
Auto Patching's main purpose is to try and tell the user they're missing root patches
New users may not realize OS updates remove our patches, so we try and run when nessasary
Conditions for running:
- Verify running GUI (TUI users can write their own scripts)
- Verify the Snapshot Seal is intact (if not, assume user is running patches)
- Verify this model needs patching (if not, assume user upgraded hardware and OCLP was not removed)
- Verify there are no updates for OCLP (ensure we have the latest patch sets)
If all these tests pass, start Root Patcher
"""
logging.info("- Starting Automatic Patching")
if self.constants.wxpython_variant is False:
logging.info("- Auto Patch option is not supported on TUI, please use GUI")
return
dict = updates.CheckBinaryUpdates(self.constants).check_binary_updates()
if dict:
version = dict["Version"]
logging.info(f"- Found new version: {version}")
app = wx.App()
mainframe = wx.Frame(None, -1, "OpenCore Legacy Patcher")
ID_GITHUB = wx.NewId()
ID_UPDATE = wx.NewId()
url = "https://api.github.com/repos/dortania/OpenCore-Legacy-Patcher/releases/latest"
response = requests.get(url).json()
try:
changelog = response["body"].split("## Asset Information")[0]
except: #if user constantly checks for updates, github will rate limit them
changelog = """## Unable to fetch changelog
Please check the Github page for more information about this release."""
html_markdown = markdown2.markdown(changelog, extras=["tables"])
html_css = css_data.updater_css
frame = wx.Dialog(None, -1, title="", size=(650, 500))
frame.SetMinSize((650, 500))
frame.SetWindowStyle(wx.STAY_ON_TOP)
panel = wx.Panel(frame)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
self.title_text = wx.StaticText(panel, label="A new version of OpenCore Legacy Patcher is available!")
self.description = wx.StaticText(panel, label=f"OpenCore Legacy Patcher {version} is now available - You have {self.constants.patcher_version}{' (Nightly)' if not self.constants.commit_info[0].startswith('refs/tags') else ''}. Would you like to update?")
self.title_text.SetFont(gui_support.font_factory(19, wx.FONTWEIGHT_BOLD))
self.description.SetFont(gui_support.font_factory(13, wx.FONTWEIGHT_NORMAL))
self.web_view = wx.html2.WebView.New(panel, style=wx.BORDER_SUNKEN)
html_code = f'''
<html>
<head>
<style>
{html_css}
</style>
</head>
<body class="markdown-body">
{html_markdown.replace("<a href=", "<a target='_blank' href=")}
</body>
</html>
'''
self.web_view.SetPage(html_code, "")
self.web_view.Bind(wx.html2.EVT_WEBVIEW_NEWWINDOW, self._onWebviewNav)
self.web_view.EnableContextMenu(False)
self.close_button = wx.Button(panel, label="Ignore")
self.close_button.Bind(wx.EVT_BUTTON, lambda event: frame.EndModal(wx.ID_CANCEL))
self.view_button = wx.Button(panel, ID_GITHUB, label="View on GitHub")
self.view_button.Bind(wx.EVT_BUTTON, lambda event: frame.EndModal(ID_GITHUB))
self.install_button = wx.Button(panel, label="Download and Install")
self.install_button.Bind(wx.EVT_BUTTON, lambda event: frame.EndModal(ID_UPDATE))
self.install_button.SetDefault()
buttonsizer = wx.BoxSizer(wx.HORIZONTAL)
buttonsizer.Add(self.close_button, 0, wx.ALIGN_CENTRE | wx.RIGHT, 5)
buttonsizer.Add(self.view_button, 0, wx.ALIGN_CENTRE | wx.LEFT|wx.RIGHT, 5)
buttonsizer.Add(self.install_button, 0, wx.ALIGN_CENTRE | wx.LEFT, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.title_text, 0, wx.ALIGN_CENTRE | wx.TOP, 20)
sizer.Add(self.description, 0, wx.ALIGN_CENTRE | wx.BOTTOM, 20)
sizer.Add(self.web_view, 1, wx.EXPAND | wx.LEFT|wx.RIGHT, 10)
sizer.Add(buttonsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 20)
panel.SetSizer(sizer)
frame.Centre()
result = frame.ShowModal()
if result == ID_GITHUB:
webbrowser.open(dict["Github Link"])
elif result == ID_UPDATE:
gui_entry.EntryPoint(self.constants).start(entry=gui_entry.SupportedEntryPoints.UPDATE_APP)
return
if utilities.check_seal() is True:
logging.info("- Detected Snapshot seal intact, detecting patches")
patches = sys_patch_detect.DetectRootPatch(self.constants.computer.real_model, self.constants).detect_patch_set()
if not any(not patch.startswith("Settings") and not patch.startswith("Validation") and patches[patch] is True for patch in patches):
patches = []
if patches:
logging.info("- Detected applicable patches, determining whether possible to patch")
if patches["Validation: Patching Possible"] is False:
logging.info("- Cannot run patching")
return
logging.info("- Determined patching is possible, checking for OCLP updates")
patch_string = ""
for patch in patches:
if patches[patch] is True and not patch.startswith("Settings") and not patch.startswith("Validation"):
patch_string += f"- {patch}\n"
logging.info("- No new binaries found on Github, proceeding with patching")
if self.constants.launcher_script is None:
args_string = f"'{self.constants.launcher_binary}' --gui_patch"
else:
args_string = f"{self.constants.launcher_binary} {self.constants.launcher_script} --gui_patch"
warning_str = ""
if network_handler.NetworkUtilities("https://api.github.com/repos/dortania/OpenCore-Legacy-Patcher/releases/latest").verify_network_connection() is False:
warning_str = f"""\n\nWARNING: We're unable to verify whether there are any new releases of OpenCore Legacy Patcher on Github. Be aware that you may be using an outdated version for this OS. If you're unsure, verify on Github that OpenCore Legacy Patcher {self.constants.patcher_version} is the latest official release"""
args = [
"/usr/bin/osascript",
"-e",
f"""display dialog "OpenCore Legacy Patcher has detected you're running without Root Patches, and would like to install them.\n\nmacOS wipes all root patches during OS installs and updates, so they need to be reinstalled.\n\nFollowing Patches have been detected for your system: \n{patch_string}\nWould you like to apply these patches?{warning_str}" """
f'with icon POSIX file "{self.constants.app_icon_path}"',
]
output = subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
if output.returncode == 0:
args = [
"/usr/bin/osascript",
"-e",
f'''do shell script "{args_string}"'''
f' with prompt "OpenCore Legacy Patcher would like to patch your root volume"'
" with administrator privileges"
" without altering line endings"
]
subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return
else:
logging.info("- No patches detected")
else:
logging.info("- Detected Snapshot seal not intact, skipping")
if self._determine_if_versions_match():
self._determine_if_boot_matches()
def _onWebviewNav(self, event):
url = event.GetURL()
webbrowser.open(url)
def _determine_if_versions_match(self):
"""
Determine if the booted version of OCLP matches the installed version
ie. Installed app is 0.2.0, but EFI version is 0.1.0
Returns:
bool: True if versions match, False if not
"""
logging.info("- Checking booted vs installed OCLP Build")
if self.constants.computer.oclp_version is None:
logging.info("- Booted version not found")
return True
if self.constants.computer.oclp_version == self.constants.patcher_version:
logging.info("- Versions match")
return True
if self.constants.special_build is True:
# Version doesn't match and we're on a special build
# Special builds don't have good ways to compare versions
logging.info("- Special build detected, assuming installed is older")
return False
# Check if installed version is newer than booted version
if updates.CheckBinaryUpdates(self.constants).check_if_newer(self.constants.computer.oclp_version):
logging.info("- Installed version is newer than booted version")
return True
args = [
"/usr/bin/osascript",
"-e",
f"""display dialog "OpenCore Legacy Patcher has detected that you are booting {'a different' if self.constants.special_build else 'an outdated'} OpenCore build\n- Booted: {self.constants.computer.oclp_version}\n- Installed: {self.constants.patcher_version}\n\nWould you like to update the OpenCore bootloader?" """
f'with icon POSIX file "{self.constants.app_icon_path}"',
]
output = subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
if output.returncode == 0:
logging.info("- Launching GUI's Build/Install menu")
self.constants.start_build_install = True
gui_entry.EntryPoint(self.constants).start(entry=gui_entry.SupportedEntryPoints.BUILD_OC)
return False
def _determine_if_boot_matches(self):
"""
Determine if the boot drive matches the macOS drive
ie. Booted from USB, but macOS is on internal disk
Goal of this function is to determine whether the user
is using a USB drive to Boot OpenCore but macOS does not
reside on the same drive as the USB.
If we determine them to be mismatched, notify the user
and ask if they want to install to install to disk.
"""
logging.info("- Determining if macOS drive matches boot drive")
should_notify = global_settings.GlobalEnviromentSettings().read_property("AutoPatch_Notify_Mismatched_Disks")
if should_notify is False:
logging.info("- Skipping due to user preference")
return
if self.constants.host_is_hackintosh is True:
logging.info("- Skipping due to hackintosh")
return
if not self.constants.booted_oc_disk:
logging.info("- Failed to find disk OpenCore launched from")
return
root_disk = self.constants.booted_oc_disk.strip("disk")
root_disk = "disk" + root_disk.split("s")[0]
logging.info(f" - Boot Drive: {self.constants.booted_oc_disk} ({root_disk})")
macOS_disk = utilities.get_disk_path()
logging.info(f" - macOS Drive: {macOS_disk}")
physical_stores = utilities.find_apfs_physical_volume(macOS_disk)
logging.info(f" - APFS Physical Stores: {physical_stores}")
disk_match = False
for disk in physical_stores:
if root_disk in disk:
logging.info(f"- Boot drive matches macOS drive ({disk})")
disk_match = True
break
if disk_match is True:
return
# Check if OpenCore is on a USB drive
logging.info("- Boot Drive does not match macOS drive, checking if OpenCore is on a USB drive")
disk_info = plistlib.loads(subprocess.run(["/usr/sbin/diskutil", "info", "-plist", root_disk], stdout=subprocess.PIPE).stdout)
try:
if disk_info["Ejectable"] is False:
logging.info("- Boot Disk is not removable, skipping prompt")
return
logging.info("- Boot Disk is ejectable, prompting user to install to internal")
args = [
"/usr/bin/osascript",
"-e",
f"""display dialog "OpenCore Legacy Patcher has detected that you are booting OpenCore from an USB or External drive.\n\nIf you would like to boot your Mac normally without a USB drive plugged in, you can install OpenCore to the internal hard drive.\n\nWould you like to launch OpenCore Legacy Patcher and install to disk?" """
f'with icon POSIX file "{self.constants.app_icon_path}"',
]
output = subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
if output.returncode == 0:
logging.info("- Launching GUI's Build/Install menu")
self.constants.start_build_install = True
gui_entry.EntryPoint(self.constants).start(entry=gui_entry.SupportedEntryPoints.BUILD_OC)
except KeyError:
logging.info("- Unable to determine if boot disk is removable, skipping prompt")
def install_auto_patcher_launch_agent(self, kdk_caching_needed: bool = False):
"""
Install the Auto Patcher Launch Agent
Installs the following:
- OpenCore-Patcher.app in /Library/Application Support/Dortania/
- com.dortania.opencore-legacy-patcher.auto-patch.plist in /Library/LaunchAgents/
See start_auto_patch() comments for more info
"""
if self.constants.launcher_script is not None:
logging.info("- Skipping Auto Patcher Launch Agent, not supported when running from source")
return
services = {
self.constants.auto_patch_launch_agent_path: "/Library/LaunchAgents/com.dortania.opencore-legacy-patcher.auto-patch.plist",
self.constants.update_launch_daemon_path: "/Library/LaunchDaemons/com.dortania.opencore-legacy-patcher.macos-update.plist",
**({ self.constants.rsr_monitor_launch_daemon_path: "/Library/LaunchDaemons/com.dortania.opencore-legacy-patcher.rsr-monitor.plist" } if self._create_rsr_monitor_daemon() else {}),
**({ self.constants.kdk_launch_daemon_path: "/Library/LaunchDaemons/com.dortania.opencore-legacy-patcher.os-caching.plist" } if kdk_caching_needed is True else {} ),
}
for service in services:
name = Path(service).name
logging.info(f"- Installing {name}")
if Path(services[service]).exists():
if hashlib.sha256(open(service, "rb").read()).hexdigest() == hashlib.sha256(open(services[service], "rb").read()).hexdigest():
logging.info(f" - {name} checksums match, skipping")
continue
logging.info(f" - Existing service found, removing")
utilities.process_status(utilities.elevated(["/bin/rm", services[service]], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
# Create parent directories
if not Path(services[service]).parent.exists():
logging.info(f" - Creating {Path(services[service]).parent} directory")
utilities.process_status(utilities.elevated(["/bin/mkdir", "-p", Path(services[service]).parent], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
utilities.process_status(utilities.elevated(["/bin/cp", service, services[service]], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
# Set the permissions on the service
utilities.process_status(utilities.elevated(["chmod", "644", services[service]], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
utilities.process_status(utilities.elevated(["chown", "root:wheel", services[service]], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
if self.constants.launcher_binary.startswith("/Library/Application Support/Dortania/"):
logging.info("- Skipping Patcher Install, already installed")
return
# Verify our binary isn't located in '/Library/Application Support/Dortania/'
# As we'd simply be duplicating ourselves
logging.info("- Installing Auto Patcher Launch Agent")
if not Path("Library/Application Support/Dortania").exists():
logging.info("- Creating /Library/Application Support/Dortania/")
utilities.process_status(utilities.elevated(["/bin/mkdir", "-p", "/Library/Application Support/Dortania"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
logging.info("- Copying OpenCore Patcher to /Library/Application Support/Dortania/")
if Path("/Library/Application Support/Dortania/OpenCore-Patcher.app").exists():
logging.info("- Deleting existing OpenCore-Patcher")
utilities.process_status(utilities.elevated(["/bin/rm", "-R", "/Library/Application Support/Dortania/OpenCore-Patcher.app"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
# Strip everything after OpenCore-Patcher.app
path = str(self.constants.launcher_binary).split("/Contents/MacOS/OpenCore-Patcher")[0]
logging.info(f"- Copying {path} to /Library/Application Support/Dortania/")
utilities.process_status(utilities.elevated(["/usr/bin/ditto", path, "/Library/Application Support/Dortania/OpenCore-Patcher.app"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
if not Path("/Library/Application Support/Dortania/OpenCore-Patcher.app").exists():
# Sometimes the binary the user launches may have a suffix (ie. OpenCore-Patcher 3.app)
# We'll want to rename it to OpenCore-Patcher.app
path = path.split("/")[-1]
logging.info(f"- Renaming {path} to OpenCore-Patcher.app")
utilities.process_status(utilities.elevated(["/bin/mv", f"/Library/Application Support/Dortania/{path}", "/Library/Application Support/Dortania/OpenCore-Patcher.app"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
subprocess.run(["/usr/bin/xattr", "-cr", "/Library/Application Support/Dortania/OpenCore-Patcher.app"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Making app alias
# Simply an easy way for users to notice the app
# If there's already an alias or exiting app, skip
if not Path("/Applications/OpenCore-Patcher.app").exists():
logging.info("- Making app alias")
utilities.process_status(utilities.elevated(["/bin/ln", "-s", "/Library/Application Support/Dortania/OpenCore-Patcher.app", "/Applications/OpenCore-Patcher.app"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
def _create_rsr_monitor_daemon(self) -> bool:
# Get kext list in /Library/Extensions that have the 'GPUCompanionBundles' property
# This is used to determine if we need to run the RSRMonitor
logging.info("- Checking if RSRMonitor is needed")
cryptex_path = f"/System/Volumes/Preboot/{utilities.get_preboot_uuid()}/cryptex1/current/OS.dmg"
if not Path(cryptex_path).exists():
logging.info("- No OS.dmg, skipping RSRMonitor")
return False
kexts = []
for kext in Path("/Library/Extensions").glob("*.kext"):
if not Path(f"{kext}/Contents/Info.plist").exists():
continue
try:
kext_plist = plistlib.load(open(f"{kext}/Contents/Info.plist", "rb"))
except Exception as e:
logging.info(f" - Failed to load plist for {kext.name}: {e}")
continue
if "GPUCompanionBundles" not in kext_plist:
continue
logging.info(f" - Found kext with GPUCompanionBundles: {kext.name}")
kexts.append(kext.name)
# If we have no kexts, we don't need to run the RSRMonitor
if not kexts:
logging.info("- No kexts found with GPUCompanionBundles, skipping RSRMonitor")
return False
# Load the RSRMonitor plist
rsr_monitor_plist = plistlib.load(open(self.constants.rsr_monitor_launch_daemon_path, "rb"))
arguments = ["/bin/rm", "-Rfv"]
arguments += [f"/Library/Extensions/{kext}" for kext in kexts]
# Add the arguments to the RSRMonitor plist
rsr_monitor_plist["ProgramArguments"] = arguments
# Next add monitoring for '/System/Volumes/Preboot/{UUID}/cryptex1/OS.dmg'
logging.info(f" - Adding monitor: {cryptex_path}")
rsr_monitor_plist["WatchPaths"] = [
cryptex_path,
]
# Write the RSRMonitor plist
plistlib.dump(rsr_monitor_plist, Path(self.constants.rsr_monitor_launch_daemon_path).open("wb"))
return True

View File

@@ -0,0 +1,822 @@
"""
sys_patch_detect.py: Hardware Detection Logic for Root Patching
"""
import logging
import plistlib
import py_sip_xnu
import packaging.version
from pathlib import Path
from .. import constants
from ..detections import (
amfi_detect,
device_probe
)
from ..utilities import (
kdk_handler,
network_handler,
utilities
)
from ..datasets import (
cpu_data,
model_array,
os_data,
sip_data,
smbios_data
)
class DetectRootPatch:
"""
Library for querying root volume patches applicable for booted system
"""
def __init__(self, model: str, global_constants: constants.Constants,
os_major: int = None, os_minor: int = None,
os_build: str = None, os_version: str = None
) -> None:
self.model: str = model
self.constants: constants.Constants = global_constants
if os_major is None:
os_major = self.constants.detected_os
if os_minor is None:
os_minor = self.constants.detected_os_minor
if os_build is None:
os_build = self.constants.detected_os_build
if os_version is None:
os_version = self.constants.detected_os_version
self.os_major: int = os_major
self.os_minor: int = os_minor
self.os_build: str = os_build
self.os_version: str = os_version
self.computer = self.constants.computer
# GPU Patch Detection
self.nvidia_tesla = False
self.kepler_gpu = False
self.nvidia_web = False
self.amd_ts1 = False
self.amd_ts2 = False
self.iron_gpu = False
self.sandy_gpu = False
self.ivy_gpu = False
self.haswell_gpu = False
self.broadwell_gpu = False
self.skylake_gpu = False
self.legacy_gcn = False
self.legacy_gcn_v2 = False
self.legacy_polaris = False
self.legacy_vega = False
# Misc Patch Detection
self.brightness_legacy = False
self.legacy_audio = False
self.legacy_wifi = False
self.modern_wifi = False
self.legacy_gmux = False
self.legacy_keyboard_backlight = False
self.legacy_uhci_ohci = False
self.legacy_pcie_webcam = False
self.legacy_t1_chip = False
# Patch Requirements
self.amfi_must_disable = False
self.amfi_shim_bins = False
self.supports_metal = False
self.needs_nv_web_checks = False
self.requires_root_kc = False
# Validation Checks
self.sip_enabled = False
self.sbm_enabled = False
self.amfi_enabled = False
self.fv_enabled = False
self.dosdude_patched = False
self.missing_kdk = False
self.has_network = False
self.unsupported_os = False
self.missing_whatever_green = False
self.missing_nv_web_nvram = False
self.missing_nv_web_opengl = False
self.missing_nv_compat = False
def _detect_gpus(self):
"""
Query GPUs and set flags for applicable patches
"""
gpus = self.constants.computer.gpus
non_metal_os = os_data.os_data.catalina
for i, gpu in enumerate(gpus):
if gpu.class_code and gpu.class_code != 0xFFFFFFFF:
logging.info(f"Found GPU ({i}): {utilities.friendly_hex(gpu.vendor_id)}:{utilities.friendly_hex(gpu.device_id)}")
if gpu.arch in [device_probe.NVIDIA.Archs.Tesla] and self.constants.force_nv_web is False:
if self.os_major > non_metal_os:
self.nvidia_tesla = True
self.amfi_must_disable = True
if os_data.os_data.ventura in self.constants.legacy_accel_support:
self.amfi_shim_bins = True
self.legacy_keyboard_backlight = self._check_legacy_keyboard_backlight()
self.requires_root_kc = True
elif gpu.arch == device_probe.NVIDIA.Archs.Kepler and self.constants.force_nv_web is False:
if self.os_major > os_data.os_data.big_sur:
# Kepler drivers were dropped with Beta 7
# 12.0 Beta 5: 21.0.0 - 21A5304g
# 12.0 Beta 6: 21.1.0 - 21A5506j
# 12.0 Beta 7: 21.1.0 - 21A5522h
if (
self.os_major >= os_data.os_data.ventura or
(
"21A5506j" not in self.os_build and
self.os_major == os_data.os_data.monterey and
self.os_minor > 0
)
):
self.kepler_gpu = True
self.supports_metal = True
if self.os_major >= os_data.os_data.ventura:
self.amfi_must_disable = True
if (self.os_major == os_data.os_data.ventura and self.os_minor >= 4) or self.os_major > os_data.os_data.ventura:
self.amfi_shim_bins = True
elif gpu.arch in [
device_probe.NVIDIA.Archs.Fermi,
device_probe.NVIDIA.Archs.Kepler,
device_probe.NVIDIA.Archs.Maxwell,
device_probe.NVIDIA.Archs.Pascal,
]:
if self.os_major > os_data.os_data.mojave:
self.nvidia_web = True
self.amfi_must_disable = True
if os_data.os_data.ventura in self.constants.legacy_accel_support:
self.amfi_shim_bins = True
self.needs_nv_web_checks = True
self.requires_root_kc = True
elif gpu.arch == device_probe.AMD.Archs.TeraScale_1:
if self.os_major > non_metal_os:
self.amd_ts1 = True
self.amfi_must_disable = True
if os_data.os_data.ventura in self.constants.legacy_accel_support:
self.amfi_shim_bins = True
self.requires_root_kc = True
elif gpu.arch == device_probe.AMD.Archs.TeraScale_2:
if self.os_major > non_metal_os:
self.amd_ts2 = True
self.amfi_must_disable = True
if os_data.os_data.ventura in self.constants.legacy_accel_support:
self.amfi_shim_bins = True
self.requires_root_kc = True
elif gpu.arch in [
device_probe.AMD.Archs.Legacy_GCN_7000,
device_probe.AMD.Archs.Legacy_GCN_8000,
device_probe.AMD.Archs.Legacy_GCN_9000,
device_probe.AMD.Archs.Polaris,
]:
if self.os_major > os_data.os_data.monterey:
if self.constants.computer.rosetta_active is True:
continue
if gpu.arch == device_probe.AMD.Archs.Polaris:
# Check if host supports AVX2.0
# If not, enable legacy GCN patch
# MacBookPro13,3 does include an unsupported framebuffer, thus we'll patch to ensure
# full compatibility (namely power states, etc)
# Reference: https://github.com/dortania/bugtracker/issues/292
# TODO: Probe framebuffer families further
# Sonoma note: MacBookPro14,3 has the same issue...
# iMac18,2/3 is partially affected, however currently it seems the generic framebuffer
# is sufficient. Only MacBookPro14,3 needs this for dGPU handling
if self.model not in ["MacBookPro13,3", "MacBookPro14,3"]:
if "AVX2" in self.constants.computer.cpu.leafs:
continue
self.legacy_polaris = True
else:
if self.model == "MacBookPro13,3":
self.legacy_gcn = True
elif self.model == "MacBookPro14,3":
if self.os_major < os_data.os_data.sonoma:
continue
self.legacy_gcn_v2 = True
else:
self.legacy_gcn = True
self.supports_metal = True
self.requires_root_kc = True
self.amfi_must_disable = True
elif gpu.arch == device_probe.AMD.Archs.Vega:
if self.os_major > os_data.os_data.monterey:
if "AVX2" in self.constants.computer.cpu.leafs:
continue
self.legacy_vega = True
self.supports_metal = True
self.requires_root_kc = True
self.amfi_must_disable = True
elif gpu.arch == device_probe.Intel.Archs.Iron_Lake:
if self.os_major > non_metal_os:
self.iron_gpu = True
self.amfi_must_disable = True
if os_data.os_data.ventura in self.constants.legacy_accel_support:
self.amfi_shim_bins = True
self.legacy_keyboard_backlight = self._check_legacy_keyboard_backlight()
self.requires_root_kc = True
elif gpu.arch == device_probe.Intel.Archs.Sandy_Bridge:
if self.os_major > non_metal_os:
self.sandy_gpu = True
self.amfi_must_disable = True
if os_data.os_data.ventura in self.constants.legacy_accel_support:
self.amfi_shim_bins = True
self.legacy_keyboard_backlight = self._check_legacy_keyboard_backlight()
self.requires_root_kc = True
elif gpu.arch == device_probe.Intel.Archs.Ivy_Bridge:
if self.os_major > os_data.os_data.big_sur:
self.ivy_gpu = True
if self.os_major >= os_data.os_data.ventura:
self.amfi_must_disable = True
if (self.os_major == os_data.os_data.ventura and self.os_minor >= 4) or self.os_major > os_data.os_data.ventura:
self.amfi_shim_bins = True
self.supports_metal = True
elif gpu.arch == device_probe.Intel.Archs.Haswell:
if self.os_major > os_data.os_data.monterey:
self.haswell_gpu = True
self.amfi_must_disable = True
if (self.os_major == os_data.os_data.ventura and self.os_minor >= 4) or self.os_major > os_data.os_data.ventura:
self.amfi_shim_bins = True
self.supports_metal = True
elif gpu.arch == device_probe.Intel.Archs.Broadwell:
if self.os_major > os_data.os_data.monterey:
self.broadwell_gpu = True
self.amfi_must_disable = True
self.supports_metal = True
elif gpu.arch == device_probe.Intel.Archs.Skylake:
if self.os_major > os_data.os_data.monterey:
self.skylake_gpu = True
self.amfi_must_disable = True
self.supports_metal = True
if self.supports_metal is True:
# Avoid patching Metal and non-Metal GPUs if both present, prioritize Metal GPU
# Main concerns are for iMac12,x with Sandy iGPU and Kepler dGPU
self.nvidia_tesla = False
self.nvidia_web = False
self.amd_ts1 = False
self.amd_ts2 = False
self.iron_gpu = False
self.sandy_gpu = False
self.legacy_keyboard_backlight = False
if self.legacy_gcn is True or self.legacy_gcn_v2 is True:
# We can only support one or the other due to the nature of relying
# on portions of the native AMD stack for Polaris and Vega
# Thus we'll prioritize legacy GCN due to being the internal card
# ex. MacPro6,1 and MacBookPro11,5 with eGPUs
self.legacy_polaris = False
self.legacy_vega = False
if self.os_major <= os_data.os_data.monterey:
# Always assume Root KC requirement on Monterey and older
self.requires_root_kc = True
else:
if self.requires_root_kc is True:
self.missing_kdk = not self._check_kdk()
def _check_networking_support(self):
"""
Query for network requirement, ex. KDK downloading
On macOS Ventura, networking support is required to download KDKs.
However for machines such as BCM94322, BCM94328 and Atheros chipsets,
users may only have wifi as their only supported network interface.
Thus we'll allow for KDK-less installs for these machines on first run.
On subsequent runs, we'll require networking to be enabled.
"""
# Increase OS check if modern wifi is detected
if self.os_major < (os_data.os_data.ventura if self.legacy_wifi is True else os_data.os_data.sonoma):
return
if self.legacy_wifi is False and self.modern_wifi is False:
return
if self.requires_root_kc is False:
return
if self.missing_kdk is False:
return
if self.has_network is True:
return
# Verify whether OCLP already installed network patches to the root volume
# If so, require networking to be enabled (user just needs to connect to wifi)
oclp_patch_path = "/System/Library/CoreServices/OpenCore-Legacy-Patcher.plist"
if Path(oclp_patch_path).exists():
oclp_plist = plistlib.load(open(oclp_patch_path, "rb"))
if "Legacy Wireless" in oclp_plist or "Modern Wireless" in oclp_plist:
return
# Due to the reliance of KDKs for most older patches, we'll allow KDK-less
# installs for Legacy Wifi patches and remove others
self.missing_kdk = False
self.requires_root_kc = False
# Reset patches needing KDK
self.nvidia_tesla = False
self.nvidia_web = False
self.amd_ts1 = False
self.amd_ts2 = False
self.iron_gpu = False
self.sandy_gpu = False
self.legacy_gcn = False
self.legacy_gcn_v2 = False
self.legacy_polaris = False
self.legacy_vega = False
self.brightness_legacy = False
self.legacy_audio = False
self.legacy_gmux = False
self.legacy_keyboard_backlight = False
# Currently all graphics patches require a KDK
if self.os_major >= os_data.os_data.sonoma:
self.kepler_gpu = False
self.ivy_gpu = False
self.haswell_gpu = False
self.broadwell_gpu = False
self.skylake_gpu = False
def _check_dgpu_status(self):
"""
Query whether system has an active dGPU
"""
dgpu = self.constants.computer.dgpu
if dgpu:
if dgpu.class_code and dgpu.class_code == 0xFFFFFFFF:
# If dGPU is disabled via class-codes, assume demuxed
return False
return True
return False
def _detect_demux(self):
"""
Query whether system has been demuxed (ex. MacBookPro8,2, disabled dGPU)
"""
# If GFX0 is missing, assume machine was demuxed
# -wegnoegpu would also trigger this, so ensure arg is not present
if not "-wegnoegpu" in (utilities.get_nvram("boot-args", decode=True) or ""):
igpu = self.constants.computer.igpu
dgpu = self._check_dgpu_status()
if igpu and not dgpu:
return True
return False
def _check_legacy_keyboard_backlight(self):
"""
Query whether system has a legacy keyboard backlight
Returns:
bool: True if legacy keyboard backlight, False otherwise
"""
# iMac12,x+ have an 'ACPI0008' device, but it's not a keyboard backlight
# Best to assume laptops will have a keyboard backlight
if self.model.startswith("MacBook"):
return self.constants.computer.ambient_light_sensor
return False
def _check_nv_web_nvram(self):
"""
Query for Nvidia Web Driver property: nvda_drv_vrl or nvda_drv
Returns:
bool: True if property is present, False otherwise
"""
nv_on = utilities.get_nvram("boot-args", decode=True)
if nv_on:
if "nvda_drv_vrl=" in nv_on:
return True
nv_on = utilities.get_nvram("nvda_drv")
if nv_on:
return True
return False
def _check_nv_web_opengl(self):
"""
Query for Nvidia Web Driver property: ngfxgl
Verify Web Drivers will run in OpenGL mode
Returns:
bool: True if property is present, False otherwise
"""
nv_on = utilities.get_nvram("boot-args", decode=True)
if nv_on:
if "ngfxgl=" in nv_on:
return True
for gpu in self.constants.computer.gpus:
if isinstance(gpu, device_probe.NVIDIA):
if gpu.disable_metal is True:
return True
return False
def _check_nv_compat(self):
"""
Query for Nvidia Web Driver property: ngfxcompat
Verify Web Drivers will skip NVDAStartupWeb compatibility check
Returns:
bool: True if property is present, False otherwise
"""
nv_on = utilities.get_nvram("boot-args", decode=True)
if nv_on:
if "ngfxcompat=" in nv_on:
return True
for gpu in self.constants.computer.gpus:
if isinstance(gpu, device_probe.NVIDIA):
if gpu.force_compatible is True:
return True
return False
def _check_whatevergreen(self):
"""
Query whether WhateverGreen.kext is loaded
Returns:
bool: True if loaded, False otherwise
"""
return utilities.check_kext_loaded("as.vit9696.WhateverGreen")
def _check_os_compat(self) -> bool:
"""
Base check to ensure patcher is compatible with host OS
"""
min_os = os_data.os_data.big_sur
max_os = os_data.os_data.sonoma
if self.os_major < min_os or self.os_major > max_os:
return False
return True
def _check_kdk(self):
"""
Query whether Kernel Debug Kit is installed
Returns:
bool: True if installed, False otherwise
"""
return kdk_handler.KernelDebugKitObject(self.constants, self.os_build, self.os_version, passive=True).kdk_already_installed
def _check_sip(self):
"""
Query System Integrity checks required for patching
Returns:
tuple: (list, str, str) of SIP values, SIP hex, SIP error message
"""
if self.os_major > os_data.os_data.catalina:
if self.nvidia_web is True:
sip = sip_data.system_integrity_protection.root_patch_sip_big_sur_3rd_part_kexts
sip_hex = "0xA03"
sip_value = (
f"For Hackintoshes, please set csr-active-config to '030A0000' ({sip_hex})\nFor non-OpenCore Macs, please run 'csrutil disable' and \n'csrutil authenticated-root disable' in RecoveryOS"
)
elif self.os_major >= os_data.os_data.ventura:
sip = sip_data.system_integrity_protection.root_patch_sip_ventura
sip_hex = "0x803"
sip_value = (
f"For Hackintoshes, please set csr-active-config to '03080000' ({sip_hex})\nFor non-OpenCore Macs, please run 'csrutil disable' and \n'csrutil authenticated-root disable' in RecoveryOS"
)
else:
sip = sip_data.system_integrity_protection.root_patch_sip_big_sur
sip_hex = "0x802"
sip_value = (
f"For Hackintoshes, please set csr-active-config to '02080000' ({sip_hex})\nFor non-OpenCore Macs, please run 'csrutil disable' and \n'csrutil authenticated-root disable' in RecoveryOS"
)
else:
sip = sip_data.system_integrity_protection.root_patch_sip_mojave
sip_hex = "0x603"
sip_value = f"For Hackintoshes, please set csr-active-config to '03060000' ({sip_hex})\nFor non-OpenCore Macs, please run 'csrutil disable' in RecoveryOS"
return (sip, sip_value, sip_hex)
def _check_uhci_ohci(self):
"""
Query whether host has UHCI/OHCI controllers, and requires USB 1.1 patches
Returns:
bool: True if UHCI/OHCI patches required, False otherwise
"""
if self.os_major < os_data.os_data.ventura:
return False
# If we're on a hackintosh, check for UHCI/OHCI controllers
if self.constants.host_is_hackintosh is True:
for controller in self.constants.computer.usb_controllers:
if (
isinstance(controller, device_probe.UHCIController) or
isinstance(controller, device_probe.OHCIController)
):
return True
return False
if self.model not in smbios_data.smbios_dictionary:
return False
# If we're on a Mac, check for Penryn or older
# This is due to Apple implementing an internal USB hub on post-Penryn (excluding MacPro4,1, MacPro5,1 and Xserve3,1)
# Ref: https://techcommunity.microsoft.com/t5/microsoft-usb-blog/reasons-to-avoid-companion-controllers/ba-p/270710
if (
smbios_data.smbios_dictionary[self.model]["CPU Generation"] <= cpu_data.CPUGen.penryn.value or \
self.model in ["MacPro4,1", "MacPro5,1", "Xserve3,1"]
):
return True
return False
# Entry point for patch set detection
def detect_patch_set(self):
"""
Query patch sets required for host
Returns:
dict: Dictionary of patch sets
"""
self.has_network = network_handler.NetworkUtilities().verify_network_connection()
if self.os_major >= os_data.os_data.sonoma:
self.legacy_pcie_webcam = self.constants.computer.pcie_webcam
self.legacy_t1_chip = self.constants.computer.t1_chip
if self.legacy_t1_chip is True:
self.amfi_must_disable = True
if self._check_uhci_ohci() is True:
self.legacy_uhci_ohci = True
self.requires_root_kc = True
if self.model in model_array.LegacyBrightness:
if self.os_major > os_data.os_data.catalina:
self.brightness_legacy = True
if self.model in ["iMac7,1", "iMac8,1"] or (self.model in model_array.LegacyAudio and utilities.check_kext_loaded("as.vit9696.AppleALC") is False):
# Special hack for systems with botched GOPs
# TL;DR: No Boot Screen breaks Lilu, therefore breaking audio
if self.os_major > os_data.os_data.catalina:
self.legacy_audio = True
if (
isinstance(self.constants.computer.wifi, device_probe.Broadcom)
and self.constants.computer.wifi.chipset in [device_probe.Broadcom.Chipsets.AirPortBrcm4331, device_probe.Broadcom.Chipsets.AirPortBrcm43224]
) or (isinstance(self.constants.computer.wifi, device_probe.Atheros) and self.constants.computer.wifi.chipset == device_probe.Atheros.Chipsets.AirPortAtheros40):
if self.os_major > os_data.os_data.big_sur:
self.legacy_wifi = True
if self.os_major >= os_data.os_data.ventura:
# Due to extracted frameworks for IO80211.framework and co, check library validation
self.amfi_must_disable = True
if self.os_major > os_data.os_data.ventura:
self.amfi_shim_bins = True
if (
isinstance(self.constants.computer.wifi, device_probe.Broadcom)
and self.constants.computer.wifi.chipset in [
device_probe.Broadcom.Chipsets.AirPortBrcm4360,
device_probe.Broadcom.Chipsets.AirportBrcmNIC,
# We don't officially support this chipset, however we'll throw a bone to hackintosh users
device_probe.Broadcom.Chipsets.AirPortBrcmNICThirdParty,
]):
if self.os_major > os_data.os_data.ventura:
self.modern_wifi = True
self.amfi_shim_bins = True
# if self.model in ["MacBookPro5,1", "MacBookPro5,2", "MacBookPro5,3", "MacBookPro8,2", "MacBookPro8,3"]:
if self.model in ["MacBookPro8,2", "MacBookPro8,3"]:
# Sierra uses a legacy GMUX control method needed for dGPU switching on MacBookPro5,x
# Same method is also used for demuxed machines
# Note that MacBookPro5,x machines are extremely unstable with this patch set, so disabled until investigated further
# Ref: https://github.com/dortania/OpenCore-Legacy-Patcher/files/7360909/KP-b10-030.txt
if self.os_major > os_data.os_data.high_sierra:
if self.model in ["MacBookPro8,2", "MacBookPro8,3"]:
# Ref: https://doslabelectronics.com/Demux.html
if self._detect_demux() is True:
self.legacy_gmux = True
else:
self.legacy_gmux = True
self._detect_gpus()
# This must be performed last, as it may override previous decisions
# Namely, whether we allow patches requiring KDKs
self._check_networking_support()
self.root_patch_dict = {
"Graphics: Nvidia Tesla": self.nvidia_tesla,
"Graphics: Nvidia Kepler": self.kepler_gpu,
"Graphics: Nvidia Web Drivers": self.nvidia_web,
"Graphics: AMD TeraScale 1": self.amd_ts1,
"Graphics: AMD TeraScale 2": self.amd_ts2,
"Graphics: AMD Legacy GCN": self.legacy_gcn,
"Graphics: AMD Legacy GCN (2017)": self.legacy_gcn_v2,
"Graphics: AMD Legacy Polaris": self.legacy_polaris,
"Graphics: AMD Legacy Vega": self.legacy_vega,
"Graphics: Intel Ironlake": self.iron_gpu,
"Graphics: Intel Sandy Bridge": self.sandy_gpu,
"Graphics: Intel Ivy Bridge": self.ivy_gpu,
"Graphics: Intel Haswell": self.haswell_gpu,
"Graphics: Intel Broadwell": self.broadwell_gpu,
"Graphics: Intel Skylake": self.skylake_gpu,
"Brightness: Legacy Backlight Control": self.brightness_legacy,
"Audio: Legacy Realtek": self.legacy_audio,
"Networking: Legacy Wireless": self.legacy_wifi,
"Networking: Modern Wireless": self.modern_wifi,
"Miscellaneous: Legacy GMUX": self.legacy_gmux,
"Miscellaneous: Legacy Keyboard Backlight": self.legacy_keyboard_backlight,
"Miscellaneous: Legacy USB 1.1": self.legacy_uhci_ohci,
"Miscellaneous: PCIe FaceTime Camera": self.legacy_pcie_webcam,
"Miscellaneous: T1 Security Chip": self.legacy_t1_chip,
"Settings: Requires AMFI exemption": self.amfi_must_disable,
"Settings: Supports Auxiliary Cache": not self.requires_root_kc,
"Settings: Kernel Debug Kit missing": self.missing_kdk if self.os_major >= os_data.os_data.ventura.value else False,
"Validation: Patching Possible": self.verify_patch_allowed(),
"Validation: Unpatching Possible": self._verify_unpatch_allowed(),
f"Validation: Unsupported Host OS": self.unsupported_os,
f"Validation: SIP is enabled (Required: {self._check_sip()[2]} or higher)": self.sip_enabled,
f"Validation: Currently Booted SIP: ({hex(py_sip_xnu.SipXnu().get_sip_status().value)})": self.sip_enabled,
"Validation: SecureBootModel is enabled": self.sbm_enabled,
f"Validation: {'AMFI' if self.constants.host_is_hackintosh is True or self._get_amfi_level_needed() > 2 else 'Library Validation'} is enabled": self.amfi_enabled if self.amfi_must_disable is True else False,
"Validation: FileVault is enabled": self.fv_enabled,
"Validation: System is dosdude1 patched": self.dosdude_patched,
"Validation: WhateverGreen.kext missing": self.missing_whatever_green if self.nvidia_web is True else False,
"Validation: Force OpenGL property missing": self.missing_nv_web_opengl if self.nvidia_web is True else False,
"Validation: Force compat property missing": self.missing_nv_compat if self.nvidia_web is True else False,
"Validation: nvda_drv(_vrl) variable missing": self.missing_nv_web_nvram if self.nvidia_web is True else False,
"Validation: Network Connection Required": (not self.has_network) if (self.requires_root_kc and self.missing_kdk and self.os_major >= os_data.os_data.ventura.value) else False,
}
return self.root_patch_dict
def _get_amfi_level_needed(self):
"""
Query the AMFI level needed for the patcher to work
Returns:
int: AMFI level needed
"""
if self.amfi_must_disable is False:
return amfi_detect.AmfiConfigDetectLevel.NO_CHECK
if self.os_major < os_data.os_data.big_sur:
return amfi_detect.AmfiConfigDetectLevel.NO_CHECK
amfipass_version = utilities.check_kext_loaded("com.dhinakg.AMFIPass")
if amfipass_version:
if packaging.version.parse(amfipass_version) >= packaging.version.parse(self.constants.amfipass_compatibility_version):
# If AMFIPass is loaded, our binaries will work
return amfi_detect.AmfiConfigDetectLevel.NO_CHECK
if self.os_major >= os_data.os_data.ventura:
if self.amfi_shim_bins is True:
# Currently we require AMFI outright disabled
# in Ventura to work with shim'd binaries
return amfi_detect.AmfiConfigDetectLevel.ALLOW_ALL
return amfi_detect.AmfiConfigDetectLevel.LIBRARY_VALIDATION
def verify_patch_allowed(self, print_errors: bool = False):
"""
Validate that the patcher can be run
Parameters:
print_errors (bool): Print errors to console
Returns:
bool: True if patching is allowed, False otherwise
"""
sip_dict = self._check_sip()
sip = sip_dict[0]
sip_value = sip_dict[1]
self.sip_enabled, self.sbm_enabled, self.fv_enabled, self.dosdude_patched = utilities.patching_status(sip, self.os_major)
self.amfi_enabled = not amfi_detect.AmfiConfigurationDetection().check_config(self._get_amfi_level_needed())
self.unsupported_os = not self._check_os_compat()
if self.nvidia_web is True:
self.missing_nv_web_nvram = not self._check_nv_web_nvram()
self.missing_nv_web_opengl = not self._check_nv_web_opengl()
self.missing_nv_compat = not self._check_nv_compat()
self.missing_whatever_green = not self._check_whatevergreen()
if print_errors is True:
if self.sip_enabled is True:
logging.info("\nCannot patch! Please disable System Integrity Protection (SIP).")
logging.info("Disable SIP in Patcher Settings and Rebuild OpenCore\n")
logging.info("Ensure the following bits are set for csr-active-config:")
logging.info("\n".join(sip))
logging.info(sip_value)
if self.sbm_enabled is True:
logging.info("\nCannot patch! Please disable Apple Secure Boot.")
logging.info("Disable SecureBootModel in Patcher Settings and Rebuild OpenCore")
logging.info("For Hackintoshes, set SecureBootModel to Disabled")
if self.fv_enabled is True:
logging.info("\nCannot patch! Please disable FileVault.")
logging.info("For OCLP Macs, please rebuild your config with 0.2.5 or newer")
logging.info("For others, Go to System Preferences -> Security and disable FileVault")
if self.amfi_enabled is True and self.amfi_must_disable is True:
logging.info("\nCannot patch! Please disable AMFI.")
logging.info("For Hackintoshes, please add amfi_get_out_of_my_way=1 to boot-args")
if self.dosdude_patched is True:
logging.info("\nCannot patch! Detected machine has already been patched by another patcher")
logging.info("Please ensure your install is either clean or patched with OpenCore Legacy Patcher")
if self.nvidia_web is True:
if self.missing_nv_web_opengl is True:
logging.info("\nCannot patch! Force OpenGL property missing")
logging.info("Please ensure ngfxgl=1 is set in boot-args")
if self.missing_nv_compat is True:
logging.info("\nCannot patch! Force Nvidia compatibility property missing")
logging.info("Please ensure ngfxcompat=1 is set in boot-args")
if self.missing_nv_web_nvram is True:
logging.info("\nCannot patch! nvda_drv(_vrl) variable missing")
logging.info("Please ensure nvda_drv_vrl=1 is set in boot-args")
if self.missing_whatever_green is True:
logging.info("\nCannot patch! WhateverGreen.kext missing")
logging.info("Please ensure WhateverGreen.kext is installed")
if (not self.has_network) if (self.requires_root_kc and self.missing_kdk and self.os_major >= os_data.os_data.ventura.value) else False:
logging.info("\nCannot patch! Network Connection Required")
logging.info("Please ensure you have an active internet connection")
if self.unsupported_os is True:
logging.info("\nCannot patch! Unsupported Host OS")
logging.info("Please ensure you are running a patcher-supported OS")
if any(
[
# General patch checks
self.sip_enabled, self.sbm_enabled, self.fv_enabled, self.dosdude_patched, self.unsupported_os,
# non-Metal specific
self.amfi_enabled if self.amfi_must_disable is True else False,
# Web Driver specific
self.missing_nv_web_nvram if self.nvidia_web is True else False,
self.missing_nv_web_opengl if self.nvidia_web is True else False,
self.missing_nv_compat if self.nvidia_web is True else False,
self.missing_whatever_green if self.nvidia_web is True else False,
# KDK specific
(not self.has_network) if (self.requires_root_kc and self.missing_kdk and self.os_major >= os_data.os_data.ventura.value) else False
]
):
return False
return True
def _verify_unpatch_allowed(self):
"""
Validate that the unpatcher can be run
Preconditions:
Must be called after verify_patch_allowed()
Returns:
bool: True if unpatching is allowed, False otherwise
"""
return not self.sip_enabled

View File

@@ -0,0 +1,212 @@
"""
sys_patch_generate.py: Class for generating patch sets for the current host
"""
import logging
from .. import constants
from ..datasets import sys_patch_dict
from ..utilities import utilities
from ..detections import device_probe
class GenerateRootPatchSets:
"""
Library for generating patch sets for the current host
Parameters:
model (str): Model identifier
global_constants (constants.Constants): Global constants object
hardware_details (dict): Dictionary of hardware details generated by detect_patch_set()
Usage:
>>> from resources.sys_patch import sys_patch_generate
>>> patchset = sys_patch_generate.GenerateRootPatches("iMac7,1", self.constants, self.hardware_details).patchset
"""
def __init__(self, model: str, global_constants: constants.Constants, hardware_details: dict) -> None:
self.model: str = model
self.constants: constants.Constants = global_constants
self.hardware_details: dict = hardware_details
self.patchset: dict = self._generate_patchset()
def _generate_patchset(self) -> dict:
"""
Generate Patchset dictionary for the current system
Returns:
dict: Dictionary of patches to be applied from sys_patch_dict.py
"""
all_hardware_patchset: dict = sys_patch_dict.SystemPatchDictionary(self.constants.detected_os, self.constants.detected_os_minor, self.constants.legacy_accel_support, self.constants.detected_os_version).patchset_dict
required_patches: dict = {}
utilities.cls()
logging.info("The following patches will be applied:")
if self.hardware_details["Graphics: Intel Ironlake"] is True:
required_patches.update({"Non-Metal Common": all_hardware_patchset["Graphics"]["Non-Metal Common"]})
required_patches.update({"WebKit Monterey Common": all_hardware_patchset["Graphics"]["WebKit Monterey Common"]})
required_patches.update({"Intel Ironlake": all_hardware_patchset["Graphics"]["Intel Ironlake"]})
if self.hardware_details["Graphics: Intel Sandy Bridge"] is True:
required_patches.update({"Non-Metal Common": all_hardware_patchset["Graphics"]["Non-Metal Common"]})
required_patches.update({"High Sierra GVA": all_hardware_patchset["Graphics"]["High Sierra GVA"]})
required_patches.update({"WebKit Monterey Common": all_hardware_patchset["Graphics"]["WebKit Monterey Common"]})
required_patches.update({"Intel Sandy Bridge": all_hardware_patchset["Graphics"]["Intel Sandy Bridge"]})
# Patchset breaks Display Profiles, don't install if primary GPU is AMD. Give users option to disable patch in settings to restore Display Profiles
if self.constants.computer.real_model not in ["Macmini5,2", "iMac12,1", "iMac12,2"]:
required_patches.update({"Revert Non-Metal ColorSync Workaround": all_hardware_patchset["Graphics"]["Revert Non-Metal ColorSync Workaround"]})
if self.hardware_details["Graphics: Intel Ivy Bridge"] is True:
required_patches.update({"Metal 3802 Common": all_hardware_patchset["Graphics"]["Metal 3802 Common"]})
required_patches.update({"Metal 3802 Common Extended": all_hardware_patchset["Graphics"]["Metal 3802 Common Extended"]})
required_patches.update({"Catalina GVA": all_hardware_patchset["Graphics"]["Catalina GVA"]})
required_patches.update({"Monterey OpenCL": all_hardware_patchset["Graphics"]["Monterey OpenCL"]})
required_patches.update({"Big Sur OpenCL": all_hardware_patchset["Graphics"]["Big Sur OpenCL"]})
required_patches.update({"WebKit Monterey Common": all_hardware_patchset["Graphics"]["WebKit Monterey Common"]})
required_patches.update({"Intel Ivy Bridge": all_hardware_patchset["Graphics"]["Intel Ivy Bridge"]})
if self.hardware_details["Graphics: Intel Haswell"] is True:
required_patches.update({"Metal 3802 Common": all_hardware_patchset["Graphics"]["Metal 3802 Common"]})
required_patches.update({"Metal 3802 Common Extended": all_hardware_patchset["Graphics"]["Metal 3802 Common Extended"]})
required_patches.update({"Monterey GVA": all_hardware_patchset["Graphics"]["Monterey GVA"]})
required_patches.update({"Monterey OpenCL": all_hardware_patchset["Graphics"]["Monterey OpenCL"]})
required_patches.update({"Intel Haswell": all_hardware_patchset["Graphics"]["Intel Haswell"]})
if self.hardware_details["Graphics: Intel Broadwell"] is True:
required_patches.update({"Monterey GVA": all_hardware_patchset["Graphics"]["Monterey GVA"]})
required_patches.update({"Monterey OpenCL": all_hardware_patchset["Graphics"]["Monterey OpenCL"]})
required_patches.update({"Intel Broadwell": all_hardware_patchset["Graphics"]["Intel Broadwell"]})
if self.hardware_details["Graphics: Intel Skylake"] is True:
required_patches.update({"Revert GVA Downgrade": all_hardware_patchset["Graphics"]["Revert GVA Downgrade"]})
required_patches.update({"Monterey OpenCL": all_hardware_patchset["Graphics"]["Monterey OpenCL"]})
required_patches.update({"Intel Skylake": all_hardware_patchset["Graphics"]["Intel Skylake"]})
if self.hardware_details["Graphics: Nvidia Tesla"] is True:
required_patches.update({"Non-Metal Common": all_hardware_patchset["Graphics"]["Non-Metal Common"]})
required_patches.update({"WebKit Monterey Common": all_hardware_patchset["Graphics"]["WebKit Monterey Common"]})
required_patches.update({"Nvidia Tesla": all_hardware_patchset["Graphics"]["Nvidia Tesla"]})
if self.hardware_details["Graphics: Nvidia Web Drivers"] is True:
required_patches.update({"Non-Metal Common": all_hardware_patchset["Graphics"]["Non-Metal Common"]})
required_patches.update({"Non-Metal IOAccelerator Common": all_hardware_patchset["Graphics"]["Non-Metal IOAccelerator Common"]})
required_patches.update({"Non-Metal CoreDisplay Common": all_hardware_patchset["Graphics"]["Non-Metal CoreDisplay Common"]})
required_patches.update({"WebKit Monterey Common": all_hardware_patchset["Graphics"]["WebKit Monterey Common"]})
required_patches.update({"Nvidia Web Drivers": all_hardware_patchset["Graphics"]["Nvidia Web Drivers"]})
required_patches.update({"Non-Metal Enforcement": all_hardware_patchset["Graphics"]["Non-Metal Enforcement"]})
if self.hardware_details["Graphics: Nvidia Kepler"] is True:
required_patches.update({"Metal 3802 Common": all_hardware_patchset["Graphics"]["Metal 3802 Common"]})
required_patches.update({"Metal 3802 Common Extended": all_hardware_patchset["Graphics"]["Metal 3802 Common Extended"]})
required_patches.update({"Catalina GVA": all_hardware_patchset["Graphics"]["Catalina GVA"]})
required_patches.update({"Monterey OpenCL": all_hardware_patchset["Graphics"]["Monterey OpenCL"]})
required_patches.update({"Big Sur OpenCL": all_hardware_patchset["Graphics"]["Big Sur OpenCL"]})
required_patches.update({"WebKit Monterey Common": all_hardware_patchset["Graphics"]["WebKit Monterey Common"]})
required_patches.update({"Nvidia Kepler": all_hardware_patchset["Graphics"]["Nvidia Kepler"]})
for gpu in self.constants.computer.gpus:
# Handle mixed GPU situations (ie. MacBookPro11,3: Haswell iGPU + Kepler dGPU)
if gpu.arch == device_probe.Intel.Archs.Haswell:
if "Catalina GVA" in required_patches:
del(required_patches["Catalina GVA"])
break
if self.hardware_details["Graphics: AMD TeraScale 1"] is True:
required_patches.update({"Non-Metal Common": all_hardware_patchset["Graphics"]["Non-Metal Common"]})
required_patches.update({"WebKit Monterey Common": all_hardware_patchset["Graphics"]["WebKit Monterey Common"]})
required_patches.update({"AMD TeraScale Common": all_hardware_patchset["Graphics"]["AMD TeraScale Common"]})
required_patches.update({"AMD TeraScale 1": all_hardware_patchset["Graphics"]["AMD TeraScale 1"]})
if self.hardware_details["Graphics: AMD TeraScale 2"] is True:
required_patches.update({"Non-Metal Common": all_hardware_patchset["Graphics"]["Non-Metal Common"]})
required_patches.update({"Non-Metal IOAccelerator Common": all_hardware_patchset["Graphics"]["Non-Metal IOAccelerator Common"]})
required_patches.update({"WebKit Monterey Common": all_hardware_patchset["Graphics"]["WebKit Monterey Common"]})
required_patches.update({"AMD TeraScale Common": all_hardware_patchset["Graphics"]["AMD TeraScale Common"]})
required_patches.update({"AMD TeraScale 2": all_hardware_patchset["Graphics"]["AMD TeraScale 2"]})
if self.constants.allow_ts2_accel is False or self.constants.detected_os not in self.constants.legacy_accel_support:
# TeraScale 2 MacBooks with faulty GPUs are highly prone to crashing with AMDRadeonX3000 attached
# Additionally, AMDRadeonX3000 requires IOAccelerator downgrade which is not installed without 'Non-Metal IOAccelerator Common'
del(required_patches["AMD TeraScale 2"]["Install"]["/System/Library/Extensions"]["AMDRadeonX3000.kext"])
if self.hardware_details["Graphics: AMD Legacy GCN"] is True or self.hardware_details["Graphics: AMD Legacy Polaris"] is True:
if self.hardware_details["Graphics: Intel Skylake"] is False:
# GVA downgrade not required if Skylake is present
required_patches.update({"Monterey GVA": all_hardware_patchset["Graphics"]["Monterey GVA"]})
required_patches.update({"Monterey OpenCL": all_hardware_patchset["Graphics"]["Monterey OpenCL"]})
if self.hardware_details["Graphics: AMD Legacy GCN"] is True:
required_patches.update({"AMD Legacy GCN": all_hardware_patchset["Graphics"]["AMD Legacy GCN"]})
else:
required_patches.update({"AMD Legacy Polaris": all_hardware_patchset["Graphics"]["AMD Legacy Polaris"]})
required_patches.update({"Revert GVA Downgrade": all_hardware_patchset["Graphics"]["Revert GVA Downgrade"]})
if "AVX2" not in self.constants.computer.cpu.leafs:
required_patches.update({"AMD OpenCL": all_hardware_patchset["Graphics"]["AMD OpenCL"]})
if self.hardware_details["Graphics: AMD Legacy GCN (2017)"] is True:
required_patches.update({"AMD Legacy GCN v2": all_hardware_patchset["Graphics"]["AMD Legacy GCN v2"]})
if self.hardware_details["Graphics: AMD Legacy Vega"] is True:
required_patches.update({"Monterey GVA": all_hardware_patchset["Graphics"]["Monterey GVA"]})
required_patches.update({"Monterey OpenCL": all_hardware_patchset["Graphics"]["Monterey OpenCL"]})
required_patches.update({"AMD Legacy Vega": all_hardware_patchset["Graphics"]["AMD Legacy Vega"]})
required_patches.update({"AMD OpenCL": all_hardware_patchset["Graphics"]["AMD OpenCL"]})
if self.hardware_details["Graphics: AMD Legacy GCN"] is True:
required_patches.update({"AMD Legacy Vega Extended": all_hardware_patchset["Graphics"]["AMD Legacy Vega Extended"]})
else:
required_patches.update({"Revert GVA Downgrade": all_hardware_patchset["Graphics"]["Revert GVA Downgrade"]})
if self.hardware_details["Brightness: Legacy Backlight Control"] is True:
required_patches.update({"Legacy Backlight Control": all_hardware_patchset["Brightness"]["Legacy Backlight Control"]})
if self.hardware_details["Audio: Legacy Realtek"] is True:
if self.model in ["iMac7,1", "iMac8,1"]:
required_patches.update({"Legacy Realtek": all_hardware_patchset["Audio"]["Legacy Realtek"]})
else:
required_patches.update({"Legacy Non-GOP": all_hardware_patchset["Audio"]["Legacy Non-GOP"]})
if self.hardware_details["Networking: Legacy Wireless"] is True:
required_patches.update({"Legacy Wireless": all_hardware_patchset["Networking"]["Legacy Wireless"]})
required_patches.update({"Legacy Wireless Extended": all_hardware_patchset["Networking"]["Legacy Wireless Extended"]})
if self.hardware_details["Networking: Modern Wireless"] is True:
required_patches.update({"Legacy Wireless": all_hardware_patchset["Networking"]["Modern Wireless"]})
if self.hardware_details["Miscellaneous: Legacy GMUX"] is True:
required_patches.update({"Legacy GMUX": all_hardware_patchset["Miscellaneous"]["Legacy GMUX"]})
if self.hardware_details["Miscellaneous: Legacy Keyboard Backlight"] is True:
required_patches.update({"Legacy Keyboard Backlight": all_hardware_patchset["Miscellaneous"]["Legacy Keyboard Backlight"]})
if self.hardware_details["Miscellaneous: Legacy USB 1.1"] is True:
required_patches.update({"Legacy USB 1.1": all_hardware_patchset["Miscellaneous"]["Legacy USB 1.1"]})
required_patches.update({"Legacy USB 1.1 Extended": all_hardware_patchset["Miscellaneous"]["Legacy USB 1.1 Extended"]})
if self.hardware_details["Miscellaneous: PCIe FaceTime Camera"] is True:
required_patches.update({"PCIe FaceTime Camera": all_hardware_patchset["Miscellaneous"]["PCIe FaceTime Camera"]})
if self.hardware_details["Miscellaneous: T1 Security Chip"] is True:
required_patches.update({"T1 Security Chip": all_hardware_patchset["Miscellaneous"]["T1 Security Chip"]})
if required_patches:
host_os_float = float(f"{self.constants.detected_os}.{self.constants.detected_os_minor}")
# Prioritize Monterey GVA patches
if "Catalina GVA" in required_patches and "Monterey GVA" in required_patches:
del(required_patches["Catalina GVA"])
for patch_name in list(required_patches):
patch_os_min_float = float(f'{required_patches[patch_name]["OS Support"]["Minimum OS Support"]["OS Major"]}.{required_patches[patch_name]["OS Support"]["Minimum OS Support"]["OS Minor"]}')
patch_os_max_float = float(f'{required_patches[patch_name]["OS Support"]["Maximum OS Support"]["OS Major"]}.{required_patches[patch_name]["OS Support"]["Maximum OS Support"]["OS Minor"]}')
if (host_os_float < patch_os_min_float or host_os_float > patch_os_max_float):
del(required_patches[patch_name])
else:
if required_patches[patch_name]["Display Name"]:
logging.info(f"- {required_patches[patch_name]['Display Name']}")
else:
logging.info("- No patch sets found for booted model")
return required_patches

View File

@@ -0,0 +1,287 @@
"""
sys_patch_helpers.py: Additional support functions for sys_patch.py
"""
import os
import logging
import plistlib
import subprocess
from typing import Union
from pathlib import Path
from datetime import datetime
from .. import constants
from ..datasets import os_data
from ..utilities import (
bplist,
generate_smbios,
utilities
)
class SysPatchHelpers:
"""
Library of helper functions for sys_patch.py and related libraries
"""
def __init__(self, global_constants: constants.Constants):
self.constants: constants.Constants = global_constants
def snb_board_id_patch(self, source_files_path: str):
"""
Patch AppleIntelSNBGraphicsFB.kext to support unsupported Board IDs
AppleIntelSNBGraphicsFB hard codes the supported Board IDs for Sandy Bridge iGPUs
Because of this, the kext errors out on unsupported systems
This function simply patches in a supported Board ID, using 'determine_best_board_id_for_sandy()'
to supplement the ideal Board ID
Parameters:
source_files_path (str): Path to the source files
"""
source_files_path = str(source_files_path)
if self.constants.computer.reported_board_id in self.constants.sandy_board_id_stock:
return
logging.info(f"Found unsupported Board ID {self.constants.computer.reported_board_id}, performing AppleIntelSNBGraphicsFB bin patching")
board_to_patch = generate_smbios.determine_best_board_id_for_sandy(self.constants.computer.reported_board_id, self.constants.computer.gpus)
logging.info(f"Replacing {board_to_patch} with {self.constants.computer.reported_board_id}")
board_to_patch_hex = bytes.fromhex(board_to_patch.encode('utf-8').hex())
reported_board_hex = bytes.fromhex(self.constants.computer.reported_board_id.encode('utf-8').hex())
if len(board_to_patch_hex) > len(reported_board_hex):
# Pad the reported Board ID with zeros to match the length of the board to patch
reported_board_hex = reported_board_hex + bytes(len(board_to_patch_hex) - len(reported_board_hex))
elif len(board_to_patch_hex) < len(reported_board_hex):
logging.info(f"Error: Board ID {self.constants.computer.reported_board_id} is longer than {board_to_patch}")
raise Exception("Host's Board ID is longer than the kext's Board ID, cannot patch!!!")
path = source_files_path + "/10.13.6/System/Library/Extensions/AppleIntelSNBGraphicsFB.kext/Contents/MacOS/AppleIntelSNBGraphicsFB"
if not Path(path).exists():
logging.info(f"Error: Could not find {path}")
raise Exception("Failed to find AppleIntelSNBGraphicsFB.kext, cannot patch!!!")
with open(path, 'rb') as f:
data = f.read()
data = data.replace(board_to_patch_hex, reported_board_hex)
with open(path, 'wb') as f:
f.write(data)
def generate_patchset_plist(self, patchset: dict, file_name: str, kdk_used: Path):
"""
Generate patchset file for user reference
Parameters:
patchset (dict): Dictionary of patchset, see sys_patch_detect.py and sys_patch_dict.py
file_name (str): Name of the file to write to
kdk_used (Path): Path to the KDK used, if any
Returns:
bool: True if successful, False if not
"""
source_path = f"{self.constants.payload_path}"
source_path_file = f"{source_path}/{file_name}"
kdk_string = "Not applicable"
if kdk_used:
kdk_string = kdk_used
data = {
"OpenCore Legacy Patcher": f"v{self.constants.patcher_version}",
"PatcherSupportPkg": f"v{self.constants.patcher_support_pkg_version}",
"Time Patched": f"{datetime.now().strftime('%B %d, %Y @ %H:%M:%S')}",
"Commit URL": f"{self.constants.commit_info[2]}",
"Kernel Debug Kit Used": f"{kdk_string}",
"OS Version": f"{self.constants.detected_os}.{self.constants.detected_os_minor} ({self.constants.detected_os_build})",
"Custom Signature": bool(Path(self.constants.payload_local_binaries_root_path / ".signed").exists()),
}
data.update(patchset)
if Path(source_path_file).exists():
os.remove(source_path_file)
# Need to write to a safe location
plistlib.dump(data, Path(source_path_file).open("wb"), sort_keys=False)
if Path(source_path_file).exists():
return True
return False
def disable_window_server_caching(self):
"""
Disable WindowServer's asset caching
On legacy GCN GPUs, the WindowServer cache generated creates
corrupted Opaque shaders.
To work-around this, we disable WindowServer caching
And force macOS into properly generating the Opaque shaders
"""
if self.constants.detected_os < os_data.os_data.ventura:
return
logging.info("Disabling WindowServer Caching")
# Invoke via 'bash -c' to resolve pathing
utilities.elevated(["bash", "-c", "rm -rf /private/var/folders/*/*/*/WindowServer/com.apple.WindowServer"])
# Disable writing to WindowServer folder
utilities.elevated(["bash", "-c", "chflags uchg /private/var/folders/*/*/*/WindowServer"])
# Reference:
# To reverse write lock:
# 'chflags nouchg /private/var/folders/*/*/*/WindowServer'
def remove_news_widgets(self):
"""
Remove News Widgets from Notification Centre
On Ivy Bridge and Haswell iGPUs, RenderBox will crash the News Widgets in
Notification Centre. To ensure users can access Notifications normally,
we manually remove all News Widgets
"""
if self.constants.detected_os < os_data.os_data.ventura:
return
logging.info("Parsing Notification Centre Widgets")
file_path = "~/Library/Containers/com.apple.notificationcenterui/Data/Library/Preferences/com.apple.notificationcenterui.plist"
file_path = Path(file_path).expanduser()
if not file_path.exists():
logging.info("- Defaults file not found, skipping")
return
did_find = False
with open(file_path, "rb") as f:
data = plistlib.load(f)
if "widgets" not in data:
return
if "instances" not in data["widgets"]:
return
for widget in list(data["widgets"]["instances"]):
widget_data = bplist.BPListReader(widget).parse()
for entry in widget_data:
if 'widget' not in entry:
continue
sub_data = bplist.BPListReader(widget_data[entry]).parse()
for sub_entry in sub_data:
if not '$object' in sub_entry:
continue
if not b'com.apple.news' in sub_data[sub_entry][2]:
continue
logging.info(f"- Found News Widget to remove: {sub_data[sub_entry][2].decode('ascii')}")
data["widgets"]["instances"].remove(widget)
did_find = True
if did_find:
with open(file_path, "wb") as f:
plistlib.dump(data, f, sort_keys=False)
subprocess.run(["/usr/bin/killall", "NotificationCenter"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def install_rsr_repair_binary(self):
"""
Installs RSRRepair
RSRRepair is a utility that will sync the SysKC and BootKC in the event of a panic
With macOS 13.2, Apple implemented the Rapid Security Response System
However Apple added a half baked snapshot reversion system if seal was broken,
which forgets to handle Preboot BootKC syncing.
Thus this application will try to re-sync the BootKC with SysKC in the event of a panic
Reference: https://github.com/dortania/OpenCore-Legacy-Patcher/issues/1019
This is a (hopefully) temporary work-around, however likely to stay.
RSRRepair has the added bonus of fixing desynced KCs from 'bless', so useful in Big Sur+
Source: https://github.com/flagersgit/RSRRepair
"""
if self.constants.detected_os < os_data.os_data.big_sur:
return
logging.info("Installing Kernel Collection syncing utility")
result = utilities.elevated([self.constants.rsrrepair_userspace_path, "--install"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode != 0:
logging.info(f"- Failed to install RSRRepair: {result.stdout.decode()}")
def patch_gpu_compiler_libraries(self, mount_point: Union[str, Path]):
"""
Fix GPUCompiler.framework's libraries to resolve linking issues
On 13.3 with 3802 GPUs, OCLP will downgrade GPUCompiler to resolve
graphics support. However the binary hardcodes the library names,
and thus we need to adjust the libraries to match (31001.669)
Important portions of the library will be downgraded to 31001.669,
and the remaining bins will be copied over (via CoW to reduce waste)
Primary folders to merge:
- 31001.XXX: (current OS version)
- include:
- module.modulemap
- opencl-c.h
- lib (entire directory)
Note: With macOS Sonoma, 32023 compiler is used instead and so this patch is not needed
until macOS 14.2 Beta 2 with version '32023.26'.
Parameters:
mount_point: The mount point of the target volume
"""
if os_data.os_data.sonoma < self.constants.detected_os < os_data.os_data.ventura:
return
if self.constants.detected_os == os_data.os_data.ventura:
if self.constants.detected_os_minor < 4: # 13.3
return
BASE_VERSION = "31001"
GPU_VERSION = f"{BASE_VERSION}.669"
elif self.constants.detected_os == os_data.os_data.sonoma:
if self.constants.detected_os_minor < 2: # 14.2 Beta 2
return
BASE_VERSION = "32023"
GPU_VERSION = f"{BASE_VERSION}.26"
LIBRARY_DIR = f"{mount_point}/System/Library/PrivateFrameworks/GPUCompiler.framework/Versions/{BASE_VERSION}/Libraries/lib/clang"
DEST_DIR = f"{LIBRARY_DIR}/{GPU_VERSION}"
if not Path(DEST_DIR).exists():
raise Exception(f"Failed to find GPUCompiler libraries at {DEST_DIR}")
for file in Path(LIBRARY_DIR).iterdir():
if file.is_file():
continue
if file.name == GPU_VERSION:
continue
# Partial match as each OS can increment the version
if not file.name.startswith(f"{BASE_VERSION}."):
continue
logging.info(f"Merging GPUCompiler.framework libraries to match binary")
src_dir = f"{LIBRARY_DIR}/{file.name}"
if not Path(f"{DEST_DIR}/lib").exists():
utilities.process_status(utilities.elevated(["/bin/cp", "-cR", f"{src_dir}/lib", f"{DEST_DIR}/"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
break