Skip to content

Commit

Permalink
fix(DBus): use async dbus interface for GPU control
Browse files Browse the repository at this point in the history
* Use multi-threading and specify the number of workers

* Make TDP interface async

* Make GPU dbus interface async too
  • Loading branch information
NeroReflex authored Apr 4, 2024
1 parent f81dfb8 commit da6f6c3
Show file tree
Hide file tree
Showing 12 changed files with 632 additions and 333 deletions.
26 changes: 15 additions & 11 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use crate::dbus::gpu::get_gpus;
mod constants;
mod performance;

#[tokio::main]
#[tokio::main(flavor = "multi_thread", worker_threads = 2)]
async fn main() -> Result<(), Box<dyn Error>> {
SimpleLogger::new().init().unwrap();
log::info!("Starting PowerStation");
Expand All @@ -33,31 +33,35 @@ async fn main() -> Result<(), Box<dyn Error>> {

// Discover all GPUs and Generate GPU objects to serve
let mut gpu_obj_paths: Vec<String> = Vec::new();
for mut card in get_gpus() {
for mut card in get_gpus().await {
// Build the DBus object path for this card
let card_name = card.name().as_str().title();
let gpu_path = card.gpu_path();
let gpu_name = card.name().await;
let card_name = gpu_name.as_str().title();
let gpu_path = card.gpu_path().await;
gpu_obj_paths.push(gpu_path.clone());

// Get the TDP interface from the card and serve it on DBus
let tdp = card.get_tdp_interface();
if tdp.is_some() {
log::debug!("Discovered TDP interface on card: {}", card_name);
let tdp = tdp.unwrap();
connection.object_server().at(gpu_path.clone(), tdp).await?;
match card.get_tdp_interface().await {
Some(tdp) => {
log::debug!("Discovered TDP interface on card: {}", card_name);
connection.object_server().at(gpu_path.clone(), tdp).await?;
},
None => {
log::warn!("Card {} does not have a TDP interface", card_name);
}
}

// Get GPU connectors from the card and serve them on DBus
let mut connector_paths: Vec<String> = Vec::new();
let connectors = get_connectors(card.name());
let connectors = get_connectors(gpu_name);
for connector in connectors {
let name = connector.name.clone().replace('-', "/");
let port_path = format!("{0}/{1}", gpu_path, name);
connector_paths.push(port_path.clone());
log::debug!("Discovered connector on {}: {}", card_name, port_path);
connection.object_server().at(port_path, connector).await?;
}
card.set_connector_paths(connector_paths);
card.set_connector_paths(connector_paths).await;

// Serve the GPU interface on DBus
connection
Expand Down
85 changes: 44 additions & 41 deletions src/performance/gpu/amd/amdgpu.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
use std::{
fs::{self, OpenOptions},
io::Write,
sync::{
Arc, Mutex
}
sync::Arc
};

use tokio::sync::Mutex;

use crate::constants::GPU_PATH;
use crate::performance::gpu::interface::GPUIface;
use crate::performance::gpu::{amd, tdp::TDPDevice};
use crate::performance::gpu::interface::GPUDevice;
use crate::performance::gpu::amd;
use crate::performance::gpu::dbus::devices::TDPDevices;
use crate::performance::gpu::interface::{GPUError, GPUResult};

#[derive(Debug, Clone)]
Expand All @@ -29,22 +30,24 @@ pub struct AMDGPU {
}


impl GPUIface for AMDGPU {
impl GPUDevice for AMDGPU {

fn get_gpu_path(&self) -> String {
format!("{0}/{1}", GPU_PATH, self.name())
async fn get_gpu_path(&self) -> String {
format!("{0}/{1}", GPU_PATH, self.name().await)
}

/// Returns the TDP DBus interface for this GPU
fn get_tdp_interface(&self) -> Option<Arc<Mutex<dyn TDPDevice>>> {
// TODO: if asusd is present, or asus-wmi is present this is where it is bound to the GPU
async fn get_tdp_interface(&self) -> Option<Arc<Mutex<TDPDevices>>> {
// if asusd is present, or asus-wmi is present this is where it is bound to the GPU
match self.class.as_str() {
"integrated" => Some(
Arc::new(
Mutex::new(
amd::tdp::TDP::new(
self.path.clone(),
self.device_id.clone()
TDPDevices::AMD(
amd::tdp::TDP::new(
self.path.clone(),
self.device_id.clone()
)
)
)
)
Expand All @@ -53,85 +56,85 @@ impl GPUIface for AMDGPU {
}
}

fn name(&self) -> String {
async fn name(&self) -> String {
self.name.clone()
}

fn path(&self) -> String {
async fn path(&self) -> String {
self.path.clone()
}

fn class(&self) -> String {
async fn class(&self) -> String {
self.class.clone()
}

fn class_id(&self) -> String {
async fn class_id(&self) -> String {
self.class_id.clone()
}

fn vendor(&self) -> String {
async fn vendor(&self) -> String {
self.vendor.clone()
}

fn vendor_id(&self) -> String {
async fn vendor_id(&self) -> String {
self.vendor_id.clone()
}

fn device(&self) -> String {
async fn device(&self) -> String {
self.device.clone()
}

fn device_id(&self) -> String {
async fn device_id(&self) -> String {
self.device_id.clone()
}

fn subdevice(&self) -> String {
async fn subdevice(&self) -> String {
self.subdevice.clone()
}

fn subdevice_id(&self) -> String {
async fn subdevice_id(&self) -> String {
self.subdevice_id.clone()
}

fn subvendor_id(&self) -> String {
async fn subvendor_id(&self) -> String {
self.subvendor_id.clone()
}

fn revision_id(&self) -> String {
async fn revision_id(&self) -> String {
self.revision_id.clone()
}

fn clock_limit_mhz_min(&self) -> GPUResult<f64> {
let limits = get_clock_limits(self.path())
async fn clock_limit_mhz_min(&self) -> GPUResult<f64> {
let limits = get_clock_limits(self.path().await)
.map_err(|err| GPUError::IOError(err.to_string()))?;

let (min, _) = limits;
Ok(min)
}

fn clock_limit_mhz_max(&self) -> GPUResult<f64> {
let limits = get_clock_limits(self.path())
async fn clock_limit_mhz_max(&self) -> GPUResult<f64> {
let limits = get_clock_limits(self.path().await)
.map_err(|err| GPUError::IOError(err.to_string()))?;

let (_, max) = limits;
Ok(max)
}

fn clock_value_mhz_min(&self) -> GPUResult<f64> {
let values = get_clock_values(self.path())
async fn clock_value_mhz_min(&self) -> GPUResult<f64> {
let values = get_clock_values(self.path().await)
.map_err(|err| GPUError::IOError(err.to_string()))?;

let (min, _) = values;
Ok(min)
}

fn set_clock_value_mhz_min(&mut self, value: f64) -> GPUResult<()> {
async fn set_clock_value_mhz_min(&mut self, value: f64) -> GPUResult<()> {
// Build the clock command to send
// https://www.kernel.org/doc/html/v5.9/gpu/amdgpu.html#pp-od-clk-voltage
let command = format!("s 0 {}\n", value);

// Open the sysfs file to write to
let path = format!("{0}/{1}", self.path(), "device/pp_od_clk_voltage");
let path = format!("{0}/{1}", self.path().await, "device/pp_od_clk_voltage");
let file = OpenOptions::new().write(true).open(path.clone());

// Write the value
Expand All @@ -156,21 +159,21 @@ impl GPUIface for AMDGPU {
)
}

fn clock_value_mhz_max(&self) -> GPUResult<f64> {
let values = get_clock_values(self.path())
async fn clock_value_mhz_max(&self) -> GPUResult<f64> {
let values = get_clock_values(self.path().await)
.map_err(|err| GPUError::IOError(err.to_string()))?;

let (_, max) = values;
Ok(max)
}

fn set_clock_value_mhz_max(&mut self, value: f64) -> GPUResult<()> {
async fn set_clock_value_mhz_max(&mut self, value: f64) -> GPUResult<()> {
// Build the clock command to send
// https://www.kernel.org/doc/html/v5.9/gpu/amdgpu.html#pp-od-clk-voltage
let command = format!("s 1 {}\n", value);

// Open the sysfs file to write to
let path = format!("{0}/{1}", self.path(), "device/pp_od_clk_voltage");
let path = format!("{0}/{1}", self.path().await, "device/pp_od_clk_voltage");
let file = OpenOptions::new().write(true).open(path.clone());

// Write the value
Expand All @@ -188,10 +191,10 @@ impl GPUIface for AMDGPU {
)
}

fn manual_clock(&self) -> GPUResult<bool> {
async fn manual_clock(&self) -> GPUResult<bool> {
let path = format!(
"{0}/{1}",
self.path(),
self.path().await,
"device/power_dpm_force_performance_level"
);

Expand All @@ -204,13 +207,13 @@ impl GPUIface for AMDGPU {
Ok(status == "manual")
}

fn set_manual_clock(&mut self, enabled: bool) -> GPUResult<()> {
async fn set_manual_clock(&mut self, enabled: bool) -> GPUResult<()> {
let status = if enabled { "manual" } else { "auto" };

// Open the sysfs file to write to
let path = format!(
"{0}/{1}",
self.path(),
self.path().await,
"device/power_dpm_force_performance_level"
);

Expand Down
95 changes: 95 additions & 0 deletions src/performance/gpu/amd/asus.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
use std::sync::Arc;
use udev::{Enumerator, Device};

use crate::performance::gpu::tdp::{TDPDevice, TDPResult, TDPError};
use crate::performance::gpu::dbus::devices::TDPDevices;

use zbus::{Connection, Result};

use rog_dbus::RogDbusClientBlocking;
use rog_dbus::DbusProxies;
use rog_platform::{platform::RogPlatform, error::PlatformError};
use rog_platform::platform::{GpuMode, Properties, ThrottlePolicy};
use rog_profiles::error::ProfileError;

use std::sync::Mutex;

/// Implementation of asusd with a fallback to asus-wmi sysfs
/// See https://www.kernel.org/doc/html/v6.8-rc4/admin-guide/abi-testing.html#abi-sys-devices-platform-platform-ppt-apu-sppt
pub struct ASUS {
platform: Arc<Mutex<RogPlatform>>,
}

impl ASUS {

/// test if we are in an asus system with asus-wmi loaded
pub async fn new() -> Option<Self> {
match RogPlatform::new() {
Ok(platform) => {
log::info!("Module asus-wmi WAS found");
Some(Self {
platform: Arc::new(Mutex::new(platform))
})
},
Err(err) => {
log::info!("Module asus-wmi not found: {}", err);
None
}
}
}

}

impl TDPDevice for ASUS {
async fn tdp(&self) -> TDPResult<f64> {
match RogDbusClientBlocking::new() {
Ok((dbus, _)) => {
let supported_properties = dbus.proxies().platform().supported_properties().unwrap();
let supported_interfaces = dbus.proxies().platform().supported_interfaces().unwrap();

match dbus.proxies().platform().ppt_apu_sppt() {
Ok(result) => {
log::info!("Initial ppt_apu_sppt: {}", result);
Ok(result as f64)
},
Err(err) => {
log::warn!("Error fetching ppt_apu_sppt: {}", err);
Err(TDPError::FailedOperation(format!("")))
}
}
},
Err(err) => {
log::warn!("Unable to use asusd to read tdp, asus-wmi interface will be used");
Err(TDPError::FailedOperation(format!("")))
}
}
}

async fn set_tdp(&mut self, value: f64) -> TDPResult<()> {
todo!()
}

async fn boost(&self) -> TDPResult<f64> {
todo!()
}

async fn set_boost(&mut self, value: f64) -> TDPResult<()> {
todo!()
}

async fn thermal_throttle_limit_c(&self) -> TDPResult<f64> {
todo!()
}

async fn set_thermal_throttle_limit_c(&mut self, limit: f64) -> TDPResult<()> {
todo!()
}

async fn power_profile(&self) -> TDPResult<String> {
todo!()
}

async fn set_power_profile(&mut self, profile: String) -> TDPResult<()> {
todo!()
}
}
Loading

0 comments on commit da6f6c3

Please sign in to comment.