USE when optimizing AI applications for mobile devices. Implements battery-aware processing, thermal management, adaptive quality settings, and power-efficient inference patterns.
Optimizes AI applications for mobile devices with battery-aware processing and thermal management.
/plugin marketplace add Git-Fg/thecattoolkit/plugin install git-fg-sys-edge-plugins-sys-edge@Git-Fg/thecattoolkitThis skill is limited to using the following tools:
Ensures AI applications run efficiently on mobile devices through intelligent battery management, thermal optimization, and adaptive quality controls.
Battery State Monitoring:
class BatteryMonitor:
def __init__(self):
self.battery = psutil.sensors_battery()
self.power_plan = self._determine_power_plan()
self.monitoring_interval = 60 # seconds
def _determine_power_plan(self) -> str:
"""Determine optimal power mode based on battery state"""
if not self.battery:
return "AC_POWER" # Desktop or AC power
percent = self.battery.percent
if percent > 75:
return "HIGH_PERFORMANCE"
elif percent > 50:
return "BALANCED"
elif percent > 20:
return "POWER_SAVER"
else:
return "CRITICAL_POWER_SAVER"
def get_processing_config(self) -> Dict[str, Any]:
"""Get processing configuration based on battery state"""
plans = {
"HIGH_PERFORMANCE": {
"batch_size": 32,
"quality": "high",
"parallel_workers": 4,
"cache_size_mb": 512
},
"BALANCED": {
"batch_size": 16,
"quality": "medium",
"parallel_workers": 2,
"cache_size_mb": 256
},
"POWER_SAVER": {
"batch_size": 8,
"quality": "low",
"parallel_workers": 1,
"cache_size_mb": 128
},
"CRITICAL_POWER_SAVER": {
"batch_size": 4,
"quality": "minimal",
"parallel_workers": 1,
"cache_size_mb": 64,
"defer_non_critical": True
}
}
return plans[self.power_plan]
Dynamic Quality Adjustment:
class AdaptiveQualityController:
def __init__(self):
self.current_quality = "medium"
self.performance_history = []
self.quality_thresholds = {
"high": {"fps": 30, "latency_ms": 100, "cpu_percent": 70},
"medium": {"fps": 20, "latency_ms": 200, "cpu_percent": 50},
"low": {"fps": 10, "latency_ms": 500, "cpu_percent": 30}
}
def adjust_quality(self, metrics: Dict[str, float]) -> str:
"""Dynamically adjust quality based on performance metrics"""
current_threshold = self.quality_thresholds[self.current_quality]
# Check if current quality is sustainable
if self._is_quality_sustainable(metrics, current_threshold):
return self.current_quality
# Determine if we need to reduce quality
if metrics["cpu_percent"] > current_threshold["cpu_percent"]:
return self._reduce_quality()
elif metrics["fps"] < current_threshold["fps"]:
return self._reduce_quality()
# Check if we can increase quality
if (metrics["cpu_percent"] < current_threshold["cpu_percent"] * 0.7 and
metrics["fps"] > current_threshold["fps"] * 1.2):
return self._increase_quality()
return self.current_quality
def _reduce_quality(self) -> str:
"""Reduce processing quality to save battery"""
quality_levels = ["high", "medium", "low", "minimal"]
current_index = quality_levels.index(self.current_quality)
return quality_levels[min(current_index + 1, len(quality_levels) - 1)]
def _increase_quality(self) -> str:
"""Increase quality if resources allow"""
quality_levels = ["high", "medium", "low", "minimal"]
current_index = quality_levels.index(self.current_quality)
return quality_levels[max(current_index - 1, 0)]
Temperature-Based Throttling:
class ThermalManager:
def __init__(self):
self.temperature_threshold = 75 # Celsius
self.critical_threshold = 85
self.throttling_active = False
self.cooldown_period = 300 # 5 minutes
def monitor_temperature(self) -> Dict[str, Any]:
"""Monitor device temperature and apply throttling if needed"""
temp = self._get_cpu_temperature()
if temp > self.critical_threshold:
return self._emergency_shutdown()
elif temp > self.temperature_threshold:
return self._activate_throttling(temp)
elif self.throttling_active and temp < (self.temperature_threshold - 10):
return self._deactivate_throttling()
else:
return {"status": "normal", "temperature": temp}
def _activate_throttling(self, temp: float) -> Dict[str, Any]:
"""Reduce processing load to cool device"""
self.throttling_active = True
throttling_config = {
"status": "throttling",
"temperature": temp,
"actions": [
"reduce_batch_size_by_50%",
"limit_parallel_workers_to_1",
"disable_non_essential_features",
"increase_processing_intervals"
]
}
return throttling_config
def _emergency_shutdown(self) -> Dict[str, Any]:
"""Critical temperature - shut down non-essential processing"""
return {
"status": "emergency_shutdown",
"actions": [
"stop_all_non_critical_processing",
"save_state_and_pause",
"notify_user_of_thermal_event"
]
}
Batch Processing and Wake Cycle Optimization:
class PowerEfficientInference:
def __init__(self):
self.pending_requests = []
self.batch_timeout = 2.0 # seconds
self.wake_lock = None
def schedule_inference(self, request: Dict) -> None:
"""Schedule inference with power-efficient batching"""
self.pending_requests.append({
"data": request,
"timestamp": time.time(),
"priority": request.get("priority", "normal")
})
# Determine if we should process batch now
if self._should_process_batch():
self._process_pending_batch()
def _should_process_batch(self) -> bool:
"""Determine if batch should be processed now"""
if len(self.pending_requests) >= 10:
return True # Process when batch is full
if time.time() - self.pending_requests[0]["timestamp"] > self.batch_timeout:
return True # Process when timeout reached
# Check if high-priority request exists
return any(req["priority"] == "high" for req in self.pending_requests)
def _process_pending_batch(self) -> List[Dict]:
"""Process all pending requests in a single wake cycle"""
if not self.pending_requests:
return []
# Acquire wake lock to prevent device sleep
self._acquire_wake_lock()
try:
# Combine all requests into batch
batch_input = self._combine_batch_inputs(self.pending_requests)
# Single inference for entire batch
batch_results = self._run_batch_inference(batch_input)
# Distribute results back to individual requests
return self._distribute_results(batch_results)
finally:
self._release_wake_lock()
self.pending_requests.clear()
Deferred Processing for Non-Critical Tasks:
class BackgroundTaskManager:
def __init__(self):
self.critical_tasks = []
self.background_tasks = []
self.charging_only_tasks = []
def defer_non_critical(self, task: Dict) -> None:
"""Mark task as non-critical and defer if on battery"""
battery = psutil.sensors_battery()
if not battery or battery.power_plugged:
# Execute immediately if on AC power
self._execute_task(task)
else:
# Defer to charging or background processing
if task.get("requires_charging", False):
self.charging_only_tasks.append(task)
else:
self.background_tasks.append(task)
def process_background_tasks(self) -> None:
"""Process background tasks during idle time"""
if not self.background_tasks:
return
# Only process if battery > 50% or device is idle
battery = psutil.sensors_battery()
is_idle = self._check_device_idleness()
if (battery and battery.percent > 50) or is_idle:
task = self.background_tasks.pop(0)
self._execute_task(task)
# Initialize mobile optimizer
optimizer = MobileOptimizer()
# Run inference with battery awareness
result = optimizer.run_inference(
input_data=data,
quality_adaptive=True, # Automatically adjust quality
battery_aware=True # Conserve battery when low
)
# Automatically adjusts:
# - Processing quality based on battery level
# - Batch size for efficiency
# - Parallel workers to manage CPU usage
# Monitor and adapt to thermal state
thermal_manager = ThermalManager()
while processing:
thermal_status = thermal_manager.monitor_temperature()
if thermal_status["status"] == "throttling":
# Automatically reduce processing load
reduce_processing_intensity()
elif thermal_status["status"] == "emergency_shutdown":
# Save state and pause
save_state_and_pause()
break
# Queue requests for batch processing
power_efficient = PowerEfficientInference()
# Multiple requests automatically batched
power_efficient.schedule_inference(request1)
power_efficient.schedule_inference(request2)
# Processed together in single wake cycle
# Reduces wake cycles by 80%
# Combined edge optimization
edge_manager = EdgeAIManager()
mobile_optimizer = MobileOptimizer()
# Apply both optimizations
config = mobile_optimizer.get_processing_config()
model = edge_manager.load_model(
model_name="generator",
constraints={
**config,
"battery_percent": get_battery_percent(),
"thermal_state": get_thermal_state()
}
)
battery_monitor.py: Battery state tracking and power plan selectionthermal_manager.py: Temperature monitoring and throttling logicadaptive_quality.py: Dynamic quality adjustment based on performancepower_efficient_scheduler.py: Batch processing and wake cycle optimizationedge-ai-management for complete mobile optimizationThis skill should be used when the user asks to "create a hookify rule", "write a hook rule", "configure hookify", "add a hookify rule", or needs guidance on hookify rule syntax and patterns.
Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, or applications. Generates creative, polished code that avoids generic AI aesthetics.