refatored vault

This commit is contained in:
2025-09-24 11:32:11 +02:00
parent d5670ca470
commit 1a40f9d2aa
16 changed files with 2364 additions and 1481 deletions

12
src-tauri/Cargo.lock generated
View File

@ -3646,9 +3646,9 @@ dependencies = [
[[package]]
name = "serde"
version = "1.0.223"
version = "1.0.226"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a505d71960adde88e293da5cb5eda57093379f64e61cf77bf0e6a63af07a7bac"
checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd"
dependencies = [
"serde_core",
"serde_derive",
@ -3667,18 +3667,18 @@ dependencies = [
[[package]]
name = "serde_core"
version = "1.0.223"
version = "1.0.226"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20f57cbd357666aa7b3ac84a90b4ea328f1d4ddb6772b430caa5d9e1309bb9e9"
checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.223"
version = "1.0.226"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d428d07faf17e306e699ec1e91996e5a165ba5d6bce5b5155173e91a8a01a56"
checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33"
dependencies = [
"proc-macro2",
"quote",

View File

@ -18,7 +18,7 @@ crate-type = ["staticlib", "cdylib", "rlib"]
serde_json = "1.0.145"
tauri-build = { version = "2.2", features = [] }
serde = { version = "1.0.223", features = ["derive"] }
serde = { version = "1.0.226", features = ["derive"] }
[dependencies]
rusqlite = { version = "0.37.0", features = [
"load_extension",

View File

@ -0,0 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type TriggerSetupResult = "Success" | "TableNotFound";

View File

@ -1,6 +1,7 @@
// src/hlc_service.rs
// src-tauri/src/crdt/hlc.rs
use crate::table_names::TABLE_CRDT_CONFIGS;
use rusqlite::{params, Connection, Result as RusqliteResult, Transaction};
use rusqlite::{params, Connection, Transaction};
use std::{
fmt::Debug,
str::FromStr,
@ -14,8 +15,6 @@ use uuid::Uuid;
const HLC_NODE_ID_TYPE: &str = "hlc_node_id";
const HLC_TIMESTAMP_TYPE: &str = "hlc_timestamp";
//pub const TABLE_CRDT_CONFIGS: &str = "haex_crdt_settings";
#[derive(Error, Debug)]
pub enum HlcError {
#[error("Database error: {0}")]
@ -28,104 +27,215 @@ pub enum HlcError {
MutexPoisoned,
#[error("Failed to create node ID: {0}")]
CreateNodeId(#[from] uhlc::SizeError),
#[error("No database connection available")]
NoConnection,
#[error("HLC service not initialized")]
NotInitialized,
#[error("Hex decode error: {0}")]
HexDecode(String),
#[error("UTF-8 conversion error: {0}")]
Utf8Error(String),
}
/// A thread-safe, persistent HLC service.
#[derive(Clone)]
pub struct HlcService(Arc<Mutex<HLC>>);
pub struct HlcService {
hlc: Arc<Mutex<Option<HLC>>>,
}
impl HlcService {
/// Creates a new HLC service, initializing it from the database or creating a new
/// persistent identity if one does not exist.
pub fn new(conn: &mut Connection) -> Result<Self, HlcError> {
// 1. Manage persistent node identity.
let node_id = Self::get_or_create_node_id(conn)?;
// 2. Create HLC instance with stable identity using the HLCBuilder.
let hlc = HLCBuilder::new()
.with_id(node_id)
.with_max_delta(Duration::from_secs(1)) // Example of custom configuration
.build();
// 3. Load the last persisted timestamp and update the clock.
let last_state_str: RusqliteResult<String> = conn.query_row(
&format!("SELECT value FROM {} WHERE key = ?1", TABLE_CRDT_CONFIGS),
params![HLC_TIMESTAMP_TYPE],
|row| row.get(0),
);
if let Ok(state_str) = last_state_str {
let timestamp =
Timestamp::from_str(&state_str).map_err(|e| HlcError::ParseTimestamp(e.cause))?;
// Update the clock with the persisted state.
// we might want to handle the error case where the clock drifts too far.
hlc.update_with_timestamp(&timestamp)
.map_err(|e| HlcError::Parse(e.to_string()))?;
/// Creates a new HLC service. The HLC will be initialized on first database access.
pub fn new() -> Self {
HlcService {
hlc: Arc::new(Mutex::new(None)),
}
let hlc_arc = Arc::new(Mutex::new(hlc));
Ok(HlcService(hlc_arc))
}
/// Generates a new timestamp and immediately persists the HLC's new state.
/// This method MUST be called within an existing database transaction (`tx`)
/// along with the actual data operation that this timestamp is for.
/// This design ensures atomicity: the data is saved with its timestamp,
/// and the clock state is updated, or none of it is.
/// Factory-Funktion: Erstellt und initialisiert einen neuen HLC-Service aus einer bestehenden DB-Verbindung.
/// Dies ist die bevorzugte Methode zur Instanziierung.
pub fn new_from_connection(conn: &mut Connection) -> Result<Self, HlcError> {
// 1. Hole oder erstelle eine persistente Node-ID
let node_id = Self::get_or_create_node_id(conn)?;
// 2. Erstelle eine HLC-Instanz mit stabiler Identität
let hlc = HLCBuilder::new()
.with_id(node_id)
.with_max_delta(Duration::from_secs(1))
.build();
// 3. Lade und wende den letzten persistenten Zeitstempel an
if let Some(last_timestamp) = Self::load_last_timestamp(conn)? {
hlc.update_with_timestamp(&last_timestamp).map_err(|e| {
HlcError::Parse(format!(
"Failed to update HLC with persisted timestamp: {:?}",
e
))
})?;
}
Ok(HlcService {
hlc: Arc::new(Mutex::new(Some(hlc))),
})
}
/* /// Initializes the HLC service with data from the database.
/// This should be called once after the database connection is available.
pub fn initialize(&self, conn: &mut Connection) -> Result<(), HlcError> {
let mut initialized = self
.initialized
.lock()
.map_err(|_| HlcError::MutexPoisoned)?;
if *initialized {
return Ok(()); // Already initialized
}
let mut hlc_guard = self.hlc.lock().map_err(|_| HlcError::MutexPoisoned)?;
// 1. Get or create persistent node ID
let node_id = Self::get_or_create_node_id(conn)?;
// 2. Create HLC instance with stable identity
let hlc = HLCBuilder::new()
.with_id(node_id)
.with_max_delta(Duration::from_secs(1))
.build();
// 3. Load and apply last persisted timestamp
if let Some(last_timestamp) = Self::load_last_timestamp(conn)? {
hlc.update_with_timestamp(&last_timestamp).map_err(|e| {
HlcError::Parse(format!(
"Failed to update HLC with persisted timestamp: {:?}",
e
))
})?;
}
*hlc_guard = Some(hlc);
*initialized = true;
Ok(())
} */
/* /// Ensures the HLC service is initialized, calling initialize if needed.
pub fn ensure_initialized(&self, conn: &mut Connection) -> Result<(), HlcError> {
let initialized = self
.initialized
.lock()
.map_err(|_| HlcError::MutexPoisoned)?;
if !*initialized {
drop(initialized); // Release lock before calling initialize
self.initialize(conn)?;
}
Ok(())
} */
/* /// Checks if the service is initialized without requiring a database connection.
pub fn is_initialized(&self) -> Result<bool, HlcError> {
let initialized = self
.initialized
.lock()
.map_err(|_| HlcError::MutexPoisoned)?;
Ok(*initialized)
} */
/// Generiert einen neuen Zeitstempel und persistiert den neuen Zustand des HLC sofort.
/// Muss innerhalb einer bestehenden Datenbanktransaktion aufgerufen werden.
pub fn new_timestamp_and_persist<'tx>(
&self,
tx: &Transaction<'tx>,
) -> Result<Timestamp, HlcError> {
let hlc = self.0.lock().map_err(|_| HlcError::MutexPoisoned)?;
let new_timestamp = hlc.new_timestamp();
let timestamp_str = new_timestamp.to_string();
let mut hlc_guard = self.hlc.lock().map_err(|_| HlcError::MutexPoisoned)?;
let hlc = hlc_guard.as_mut().ok_or(HlcError::NotInitialized)?;
let new_timestamp = hlc.new_timestamp();
Self::persist_timestamp(tx, &new_timestamp)?;
Ok(new_timestamp)
}
/// Erstellt einen neuen Zeitstempel, ohne ihn zu persistieren (z.B. für Leseoperationen).
pub fn new_timestamp(&self) -> Result<Timestamp, HlcError> {
let mut hlc_guard = self.hlc.lock().map_err(|_| HlcError::MutexPoisoned)?;
let hlc = hlc_guard.as_mut().ok_or(HlcError::NotInitialized)?;
Ok(hlc.new_timestamp())
}
/// Aktualisiert den HLC mit einem externen Zeitstempel (für die Synchronisation).
pub fn update_with_timestamp(&self, timestamp: &Timestamp) -> Result<(), HlcError> {
let mut hlc_guard = self.hlc.lock().map_err(|_| HlcError::MutexPoisoned)?;
let hlc = hlc_guard.as_mut().ok_or(HlcError::NotInitialized)?;
hlc.update_with_timestamp(timestamp)
.map_err(|e| HlcError::Parse(format!("Failed to update HLC: {:?}", e)))
}
/// Lädt den letzten persistierten Zeitstempel aus der Datenbank.
fn load_last_timestamp(conn: &Connection) -> Result<Option<Timestamp>, HlcError> {
let query = format!("SELECT value FROM {} WHERE key = ?1", TABLE_CRDT_CONFIGS);
match conn.query_row(&query, params![HLC_TIMESTAMP_TYPE], |row| {
row.get::<_, String>(0)
}) {
Ok(state_str) => {
let timestamp = Timestamp::from_str(&state_str).map_err(|e| {
HlcError::ParseTimestamp(format!("Invalid timestamp format: {:?}", e))
})?;
Ok(Some(timestamp))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(HlcError::Database(e)),
}
}
/// Persistiert einen Zeitstempel in der Datenbank innerhalb einer Transaktion.
fn persist_timestamp(tx: &Transaction, timestamp: &Timestamp) -> Result<(), HlcError> {
let timestamp_str = timestamp.to_string();
tx.execute(
&format!(
"INSERT INTO {} (key, value) VALUES (?1,?2)
"INSERT INTO {} (key, value) VALUES (?1, ?2)
ON CONFLICT(key) DO UPDATE SET value = excluded.value",
TABLE_CRDT_CONFIGS
),
params![HLC_TIMESTAMP_TYPE, timestamp_str],
)?;
Ok(new_timestamp)
Ok(())
}
/// Retrieves or creates and persists a stable node ID for the HLC.
/// Holt oder erstellt und persistiert eine stabile Node-ID für den HLC.
fn get_or_create_node_id(conn: &mut Connection) -> Result<ID, HlcError> {
let tx = conn.transaction_with_behavior(rusqlite::TransactionBehavior::Immediate)?;
let query = format!("SELECT value FROM {} WHERE key = ?1", TABLE_CRDT_CONFIGS);
let query = format!("SELECT value FROM {} WHERE key =?1", TABLE_CRDT_CONFIGS);
match tx.query_row(&query, params![HLC_NODE_ID_TYPE], |row| {
row.get::<_, String>(0)
let id = match tx.query_row(&query, params![HLC_NODE_ID_TYPE], |row| {
row.get::<_, Vec<u8>>(0)
}) {
Ok(id_str) => {
// ID exists, parse and return it.
let id_bytes = hex::decode(id_str).map_err(|e| HlcError::Parse(e.to_string()))?;
let id = ID::try_from(id_bytes.as_slice())?;
tx.commit()?;
Ok(id)
}
Ok(id_bytes) => ID::try_from(id_bytes.as_slice())
.map_err(|e| HlcError::Parse(format!("Invalid node ID format: {:?}", e)))?,
Err(rusqlite::Error::QueryReturnedNoRows) => {
// No ID found, create, persist, and return a new one.
let new_id_bytes = Uuid::new_v4().as_bytes().to_vec();
let new_id = ID::try_from(new_id_bytes.as_slice())?;
let new_id_str = hex::encode(new_id.to_le_bytes());
tx.execute(
&format!(
"INSERT INTO {} (key, value) VALUES (?1, ?2)",
TABLE_CRDT_CONFIGS
),
params![HLC_NODE_ID_TYPE, new_id_str],
params![HLC_NODE_ID_TYPE, new_id_bytes.as_slice()],
)?;
tx.commit()?;
Ok(new_id)
new_id
}
Err(e) => Err(HlcError::from(e)),
}
Err(e) => return Err(HlcError::Database(e)),
};
tx.commit()?;
Ok(id)
}
}
impl Default for HlcService {
fn default() -> Self {
Self::new()
}
}

View File

@ -1,3 +1,3 @@
pub mod hlc;
pub mod proxy;
pub mod transformer;
pub mod trigger;

View File

@ -1,416 +0,0 @@
// In src-tauri/src/crdt/proxy.rs
use crate::crdt::hlc::HlcService;
use crate::crdt::trigger::{HLC_TIMESTAMP_COLUMN, TOMBSTONE_COLUMN};
use crate::table_names::{TABLE_CRDT_CONFIGS, TABLE_CRDT_LOGS};
use rusqlite::Connection;
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use sqlparser::ast::{
Assignment, AssignmentTarget, BinaryOperator, ColumnDef, DataType, Expr, Ident, Insert,
ObjectName, ObjectNamePart, SelectItem, SetExpr, Statement, TableFactor, TableObject,
TableWithJoins, UpdateTableFromKind, Value, ValueWithSpan,
};
use sqlparser::dialect::SQLiteDialect;
use sqlparser::parser::Parser;
use std::collections::HashSet;
use std::sync::{Arc, Mutex};
use tauri::{path::BaseDirectory, AppHandle, Manager, State};
use ts_rs::TS;
use uhlc::Timestamp;
pub struct DbConnection(pub Arc<Mutex<Option<Connection>>>);
#[derive(Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(tag = "type", content = "details")]
pub enum ProxyError {
/// Der SQL-Code konnte nicht geparst werden.
ParseError {
reason: String,
},
/// Ein Fehler ist während der Ausführung in der Datenbank aufgetreten.
ExecutionError {
sql: String,
reason: String,
},
/// Ein Fehler ist beim Verwalten der Transaktion aufgetreten.
TransactionError {
reason: String,
},
/// Ein SQL-Statement wird vom Proxy nicht unterstützt (z.B. DELETE von einer Subquery).
UnsupportedStatement {
description: String,
},
HlcError {
reason: String,
},
}
// Tabellen, die von der Proxy-Logik ausgeschlossen sind.
const EXCLUDED_TABLES: &[&str] = &[TABLE_CRDT_CONFIGS, TABLE_CRDT_LOGS];
pub struct SqlProxy;
impl SqlProxy {
pub fn new() -> Self {
Self {}
}
/// Führt SQL-Anweisungen aus, nachdem sie für CRDT-Konformität transformiert wurden.
pub fn execute(
&self,
sql: &str,
params: Vec<JsonValue>,
state: State<'_, DbConnection>,
hlc_service: &HlcService,
) -> Result<Vec<String>, ProxyError> {
let dialect = SQLiteDialect {};
let mut ast_vec = Parser::parse_sql(&dialect, sql).map_err(|e| ProxyError::ParseError {
reason: e.to_string(),
})?;
let mut modified_schema_tables = HashSet::new();
let db_lock = state
.0
.lock()
.map_err(|e| format!("Mutex Lock Fehler: {}", e))?;
let conn = db_lock.as_ref().ok_or("Keine Datenbankverbindung")?;
let tx = conn
.transaction()
.map_err(|e| ProxyError::TransactionError {
reason: e.to_string(),
})?;
/* let hlc_timestamp =
hlc_service
.new_timestamp_and_persist(&tx)
.map_err(|e| ProxyError::HlcError {
reason: e.to_string(),
})?; */
for statement in &mut ast_vec {
if let Some(table_name) = self.transform_statement(statement)? {
modified_schema_tables.insert(table_name);
}
}
for statement in ast_vec {
let final_sql = statement.to_string();
tx.execute(&final_sql, [])
.map_err(|e| ProxyError::ExecutionError {
sql: final_sql,
reason: e.to_string(),
})?;
}
tx.commit().map_err(|e| ProxyError::TransactionError {
reason: e.to_string(),
})?;
Ok(modified_schema_tables.into_iter().collect())
}
/// Wendet die Transformation auf ein einzelnes Statement an.
fn transform_statement(&self, stmt: &mut Statement) -> Result<Option<String>, ProxyError> {
match stmt {
Statement::Query(query) => {
if let SetExpr::Select(select) = &mut *query.body {
let mut tombstone_filters = Vec::new();
for twj in &select.from {
if let TableFactor::Table { name, alias, .. } = &twj.relation {
if self.is_audited_table(name) {
let table_idents = if let Some(a) = alias {
vec![a.name.clone()]
} else {
name.0
.iter()
.filter_map(|part| match part {
ObjectNamePart::Identifier(id) => Some(id.clone()),
_ => None,
})
.collect::<Vec<_>>()
};
let column_ident = Ident::new(TOMBSTONE_COLUMN);
let full_ident = [table_idents, vec![column_ident]].concat();
let filter = Expr::BinaryOp {
left: Box::new(Expr::CompoundIdentifier(full_ident)),
op: BinaryOperator::Eq,
right: Box::new(Expr::Value(
sqlparser::ast::Value::Number("1".to_string(), false)
.into(),
)),
};
tombstone_filters.push(filter);
}
}
}
if !tombstone_filters.is_empty() {
let combined_filter = tombstone_filters
.into_iter()
.reduce(|acc, expr| Expr::BinaryOp {
left: Box::new(acc),
op: BinaryOperator::And,
right: Box::new(expr),
})
.unwrap();
match &mut select.selection {
Some(existing) => {
*existing = Expr::BinaryOp {
left: Box::new(existing.clone()),
op: BinaryOperator::And,
right: Box::new(combined_filter),
};
}
None => {
select.selection = Some(combined_filter);
}
}
}
}
// TODO: UNION, EXCEPT etc. werden hier nicht behandelt
}
Statement::CreateTable(create_table) => {
if self.is_audited_table(&create_table.name) {
self.add_crdt_columns(&mut create_table.columns);
return Ok(Some(
create_table
.name
.to_string()
.trim_matches('`')
.trim_matches('"')
.to_string(),
));
}
}
Statement::Insert(insert_stmt) => {
if let TableObject::TableName(name) = &insert_stmt.table {
if self.is_audited_table(name) {
self.add_hlc_to_insert(insert_stmt);
}
}
}
Statement::Update {
table,
assignments,
from,
selection,
returning,
or,
} => {
if let TableFactor::Table { name, .. } = &table.relation {
if self.is_audited_table(&name) {
if let Some(ts) = hlc_timestamp {
assignments.push(self.create_hlc_assignment(ts));
}
}
}
*stmt = Statement::Update {
table: table.clone(),
assignments: assignments.clone(),
from: from.clone(),
selection: selection.clone(),
returning: returning.clone(),
or: *or,
};
}
Statement::Delete(del_stmt) => {
let table_name = self.extract_table_name_from_from(&del_stmt.from);
if let Some(name) = table_name {
if self.is_audited_table(&name) {
// GEÄNDERT: Übergibt den Zeitstempel an die Transformationsfunktion
self.transform_delete_to_update(stmt);
}
} else {
return Err(ProxyError::UnsupportedStatement {
description: "DELETE from non-table source or multiple tables".to_string(),
});
}
}
Statement::AlterTable { name, .. } => {
if self.is_audited_table(name) {
return Ok(Some(
name.to_string()
.trim_matches('`')
.trim_matches('"')
.to_string(),
));
}
}
_ => {}
}
Ok(None)
}
/// Fügt die Tombstone-Spalte zu einer Liste von Spaltendefinitionen hinzu.
fn add_tombstone_column(&self, columns: &mut Vec<ColumnDef>) {
if !columns
.iter()
.any(|c| c.name.value.to_lowercase() == TOMBSTONE_COLUMN)
{
columns.push(ColumnDef {
name: Ident::new(TOMBSTONE_COLUMN),
data_type: DataType::Integer(None),
options: vec![],
});
}
}
/// Prüft, ob eine Tabelle von der Proxy-Logik betroffen sein soll.
fn is_audited_table(&self, name: &ObjectName) -> bool {
let table_name = name.to_string().to_lowercase();
let table_name = table_name.trim_matches('`').trim_matches('"');
!EXCLUDED_TABLES.contains(&table_name)
}
fn extract_table_name_from_from(&self, from: &sqlparser::ast::FromTable) -> Option<ObjectName> {
let tables = match from {
sqlparser::ast::FromTable::WithFromKeyword(from)
| sqlparser::ast::FromTable::WithoutKeyword(from) => from,
};
if tables.len() == 1 {
if let TableFactor::Table { name, .. } = &tables[0].relation {
Some(name.clone())
} else {
None
}
} else {
None
}
}
fn extract_table_name(&self, from: &[TableWithJoins]) -> Option<ObjectName> {
if from.len() == 1 {
if let TableFactor::Table { name, .. } = &from[0].relation {
Some(name.clone())
} else {
None
}
} else {
None
}
}
fn create_tombstone_assignment(&self) -> Assignment {
Assignment {
target: AssignmentTarget::ColumnName(ObjectName(vec![ObjectNamePart::Identifier(
Ident::new(TOMBSTONE_COLUMN),
)])),
value: Expr::Value(sqlparser::ast::Value::Number("1".to_string(), false).into()),
}
}
fn add_tombstone_filter(&self, selection: &mut Option<Expr>) {
let tombstone_expr = Expr::BinaryOp {
left: Box::new(Expr::Identifier(Ident::new(TOMBSTONE_COLUMN))),
op: BinaryOperator::Eq,
// HIER IST DIE FINALE KORREKTUR:
right: Box::new(Expr::Value(Value::Number("0".to_string(), false).into())),
};
match selection {
Some(existing) => {
// Kombiniere mit AND, wenn eine WHERE-Klausel existiert
*selection = Some(Expr::BinaryOp {
left: Box::new(existing.clone()),
op: BinaryOperator::And,
right: Box::new(tombstone_expr),
});
}
None => {
// Setze neue WHERE-Klausel, wenn keine existiert
*selection = Some(tombstone_expr);
}
}
}
fn add_crdt_columns(&self, columns: &mut Vec<ColumnDef>) {
if !columns.iter().any(|c| c.name.value == TOMBSTONE_COLUMN) {
columns.push(ColumnDef {
name: Ident::new(TOMBSTONE_COLUMN),
data_type: DataType::Integer(None),
options: vec![],
});
}
if !columns.iter().any(|c| c.name.value == HLC_TIMESTAMP_COLUMN) {
columns.push(ColumnDef {
name: Ident::new(HLC_TIMESTAMP_COLUMN),
data_type: DataType::String(None),
options: vec![],
});
}
}
fn transform_delete_to_update(&self, stmt: &mut Statement) {
if let Statement::Delete(del_stmt) = stmt {
let table_to_update = match &del_stmt.from {
sqlparser::ast::FromTable::WithFromKeyword(from)
| sqlparser::ast::FromTable::WithoutKeyword(from) => from[0].clone(),
};
let assignments = vec![self.create_tombstone_assignment()];
*stmt = Statement::Update {
table: table_to_update,
assignments,
from: None,
selection: del_stmt.selection.clone(),
returning: None,
or: None,
};
}
}
fn add_hlc_to_insert(
&self,
insert_stmt: &mut sqlparser::ast::Insert,
ts: &Timestamp,
) -> Result<(), ProxyError> {
insert_stmt.columns.push(Ident::new(HLC_TIMESTAMP_COLUMN));
match insert_stmt.source.as_mut() {
Some(query) => match &mut *query.body {
// Dereferenziere die Box mit *
SetExpr::Values(values) => {
for row in &mut values.rows {
row.push(Expr::Value(
Value::SingleQuotedString(ts.to_string()).into(),
));
}
}
SetExpr::Select(select) => {
let hlc_expr = Expr::Value(Value::SingleQuotedString(ts.to_string()).into());
select.projection.push(SelectItem::UnnamedExpr(hlc_expr));
}
_ => {
return Err(ProxyError::UnsupportedStatement {
description: "INSERT with unsupported source".to_string(),
});
}
},
None => {
return Err(ProxyError::UnsupportedStatement {
description: "INSERT statement has no source".to_string(),
});
}
}
Ok(())
}
/// Erstellt eine Zuweisung `haex_modified_hlc = '...'`
// NEU: Hilfsfunktion
fn create_hlc_assignment(&self, ts: &Timestamp) -> Assignment {
Assignment {
target: AssignmentTarget::ColumnName(ObjectName(vec![ObjectNamePart::Identifier(
Ident::new(HLC_TIMESTAMP_COLUMN),
)])),
value: Expr::Value(Value::SingleQuotedString(ts.to_string()).into()),
}
}
}

View File

@ -0,0 +1,787 @@
use crate::crdt::trigger::{HLC_TIMESTAMP_COLUMN, TOMBSTONE_COLUMN};
use crate::database::error::DatabaseError;
use crate::table_names::{TABLE_CRDT_CONFIGS, TABLE_CRDT_LOGS};
use sqlparser::ast::{
Assignment, AssignmentTarget, BinaryOperator, ColumnDef, DataType, Expr, Ident, Insert,
ObjectName, ObjectNamePart, SelectItem, SetExpr, Statement, TableFactor, TableObject, Value,
};
use std::borrow::Cow;
use std::collections::HashSet;
use uhlc::Timestamp;
/// Konfiguration für CRDT-Spalten
#[derive(Clone)]
struct CrdtColumns {
tombstone: &'static str,
hlc_timestamp: &'static str,
}
impl CrdtColumns {
const DEFAULT: Self = Self {
tombstone: TOMBSTONE_COLUMN,
hlc_timestamp: HLC_TIMESTAMP_COLUMN,
};
/// Erstellt einen Tombstone-Filter für eine Tabelle
fn create_tombstone_filter(&self, table_alias: Option<&str>) -> Expr {
let column_expr = match table_alias {
Some(alias) => {
// Qualifizierte Referenz: alias.tombstone
Expr::CompoundIdentifier(vec![Ident::new(alias), Ident::new(self.tombstone)])
}
None => {
// Einfache Referenz: tombstone
Expr::Identifier(Ident::new(self.tombstone))
}
};
Expr::BinaryOp {
left: Box::new(column_expr),
op: BinaryOperator::NotEq,
right: Box::new(Expr::Value(Value::Number("1".to_string(), false).into())),
}
}
/// Erstellt eine Tombstone-Zuweisung für UPDATE/DELETE
fn create_tombstone_assignment(&self) -> Assignment {
Assignment {
target: AssignmentTarget::ColumnName(ObjectName(vec![ObjectNamePart::Identifier(
Ident::new(self.tombstone),
)])),
value: Expr::Value(Value::Number("1".to_string(), false).into()),
}
}
/// Erstellt eine HLC-Zuweisung für UPDATE/DELETE
fn create_hlc_assignment(&self, timestamp: &Timestamp) -> Assignment {
Assignment {
target: AssignmentTarget::ColumnName(ObjectName(vec![ObjectNamePart::Identifier(
Ident::new(self.hlc_timestamp),
)])),
value: Expr::Value(Value::SingleQuotedString(timestamp.to_string()).into()),
}
}
/// Fügt CRDT-Spalten zu einer Tabellendefinition hinzu
fn add_to_table_definition(&self, columns: &mut Vec<ColumnDef>) {
if !columns.iter().any(|c| c.name.value == self.tombstone) {
columns.push(ColumnDef {
name: Ident::new(self.tombstone),
data_type: DataType::Integer(None),
options: vec![],
});
}
if !columns.iter().any(|c| c.name.value == self.hlc_timestamp) {
columns.push(ColumnDef {
name: Ident::new(self.hlc_timestamp),
data_type: DataType::String(None),
options: vec![],
});
}
}
}
pub struct CrdtTransformer {
columns: CrdtColumns,
excluded_tables: HashSet<&'static str>,
}
impl CrdtTransformer {
pub fn new() -> Self {
let mut excluded_tables = HashSet::new();
excluded_tables.insert(TABLE_CRDT_CONFIGS);
excluded_tables.insert(TABLE_CRDT_LOGS);
Self {
columns: CrdtColumns::DEFAULT,
excluded_tables,
}
}
/// Prüft, ob eine Tabelle CRDT-Synchronisation unterstützen soll
fn is_crdt_sync_table(&self, name: &ObjectName) -> bool {
let table_name = self.normalize_table_name(name);
!self.excluded_tables.contains(table_name.as_ref())
}
/// Normalisiert Tabellennamen (entfernt Anführungszeichen)
fn normalize_table_name(&self, name: &ObjectName) -> Cow<str> {
let name_str = name.to_string().to_lowercase();
Cow::Owned(name_str.trim_matches('`').trim_matches('"').to_string())
}
pub fn transform_select_statement(&self, stmt: &mut Statement) -> Result<(), DatabaseError> {
match stmt {
Statement::Query(query) => self.transform_query_recursive(query),
// Fange alle anderen Fälle ab und gib einen Fehler zurück
_ => Err(DatabaseError::UnsupportedStatement {
statement_type: format!("{:?}", stmt)
.split('(')
.next()
.unwrap_or("")
.to_string(),
description: "This operation only accepts SELECT statements.".to_string(),
}),
}
}
pub fn transform_execute_statement(
&self,
stmt: &mut Statement,
hlc_timestamp: &Timestamp,
) -> Result<Option<String>, DatabaseError> {
match stmt {
Statement::CreateTable(create_table) => {
if self.is_crdt_sync_table(&create_table.name) {
self.columns
.add_to_table_definition(&mut create_table.columns);
Ok(Some(
self.normalize_table_name(&create_table.name).into_owned(),
))
} else {
Ok(None)
}
}
Statement::Insert(insert_stmt) => {
if let TableObject::TableName(name) = &insert_stmt.table {
if self.is_crdt_sync_table(name) {
self.transform_insert(insert_stmt, hlc_timestamp)?;
}
}
Ok(None)
}
Statement::Update {
table, assignments, ..
} => {
if let TableFactor::Table { name, .. } = &table.relation {
if self.is_crdt_sync_table(name) {
assignments.push(self.columns.create_hlc_assignment(hlc_timestamp));
}
}
Ok(None)
}
Statement::Delete(del_stmt) => {
if let Some(table_name) = self.extract_table_name_from_delete(del_stmt) {
if self.is_crdt_sync_table(&table_name) {
self.transform_delete_to_update(stmt, hlc_timestamp)?;
}
Ok(None)
} else {
Err(DatabaseError::UnsupportedStatement {
statement_type: "DELETE".to_string(),
description: "DELETE from non-table source or multiple tables".to_string(),
})
}
}
Statement::AlterTable { name, .. } => {
if self.is_crdt_sync_table(name) {
Ok(Some(self.normalize_table_name(name).into_owned()))
} else {
Ok(None)
}
}
_ => Ok(None),
}
}
/// Transformiert Query-Statements (fügt Tombstone-Filter hinzu)
fn transform_query_recursive(
&self,
query: &mut sqlparser::ast::Query,
) -> Result<(), DatabaseError> {
self.add_tombstone_filters_recursive(&mut query.body)
}
/// Rekursive Behandlung aller SetExpr-Typen mit vollständiger Subquery-Unterstützung
fn add_tombstone_filters_recursive(&self, set_expr: &mut SetExpr) -> Result<(), DatabaseError> {
match set_expr {
SetExpr::Select(select) => {
self.add_tombstone_filters_to_select(select)?;
// Transformiere auch Subqueries in Projektionen
for projection in &mut select.projection {
match projection {
SelectItem::UnnamedExpr(expr) | SelectItem::ExprWithAlias { expr, .. } => {
self.transform_expression_subqueries(expr)?;
}
_ => {} // Wildcard projections ignorieren
}
}
// Transformiere Subqueries in WHERE
if let Some(where_clause) = &mut select.selection {
self.transform_expression_subqueries(where_clause)?;
}
// Transformiere Subqueries in GROUP BY
match &mut select.group_by {
sqlparser::ast::GroupByExpr::All(_) => {
// GROUP BY ALL - keine Expressions zu transformieren
}
sqlparser::ast::GroupByExpr::Expressions(exprs, _) => {
for group_expr in exprs {
self.transform_expression_subqueries(group_expr)?;
}
}
}
// Transformiere Subqueries in HAVING
if let Some(having) = &mut select.having {
self.transform_expression_subqueries(having)?;
}
}
SetExpr::SetOperation { left, right, .. } => {
self.add_tombstone_filters_recursive(left)?;
self.add_tombstone_filters_recursive(right)?;
}
SetExpr::Query(query) => {
self.add_tombstone_filters_recursive(&mut query.body)?;
}
SetExpr::Values(values) => {
// Transformiere auch Subqueries in Values-Listen
for row in &mut values.rows {
for expr in row {
self.transform_expression_subqueries(expr)?;
}
}
}
_ => {} // Andere Fälle
}
Ok(())
}
/// Transformiert Subqueries innerhalb von Expressions
fn transform_expression_subqueries(&self, expr: &mut Expr) -> Result<(), DatabaseError> {
match expr {
// Einfache Subqueries
Expr::Subquery(query) => {
self.add_tombstone_filters_recursive(&mut query.body)?;
}
// EXISTS Subqueries
Expr::Exists { subquery, .. } => {
self.add_tombstone_filters_recursive(&mut subquery.body)?;
}
// IN Subqueries
Expr::InSubquery {
expr: left_expr,
subquery,
..
} => {
self.transform_expression_subqueries(left_expr)?;
self.add_tombstone_filters_recursive(&mut subquery.body)?;
}
// ANY/ALL Subqueries
Expr::AnyOp { left, right, .. } | Expr::AllOp { left, right, .. } => {
self.transform_expression_subqueries(left)?;
self.transform_expression_subqueries(right)?;
}
// Binäre Operationen
Expr::BinaryOp { left, right, .. } => {
self.transform_expression_subqueries(left)?;
self.transform_expression_subqueries(right)?;
}
// Unäre Operationen
Expr::UnaryOp {
expr: inner_expr, ..
} => {
self.transform_expression_subqueries(inner_expr)?;
}
// Verschachtelte Ausdrücke
Expr::Nested(nested) => {
self.transform_expression_subqueries(nested)?;
}
// CASE-Ausdrücke
Expr::Case {
operand,
conditions,
else_result,
..
} => {
if let Some(op) = operand {
self.transform_expression_subqueries(op)?;
}
for case_when in conditions {
self.transform_expression_subqueries(&mut case_when.condition)?;
self.transform_expression_subqueries(&mut case_when.result)?;
}
if let Some(else_res) = else_result {
self.transform_expression_subqueries(else_res)?;
}
}
// Funktionsaufrufe
Expr::Function(func) => match &mut func.args {
sqlparser::ast::FunctionArguments::List(sqlparser::ast::FunctionArgumentList {
args,
..
}) => {
for arg in args {
if let sqlparser::ast::FunctionArg::Unnamed(
sqlparser::ast::FunctionArgExpr::Expr(expr),
) = arg
{
self.transform_expression_subqueries(expr)?;
}
}
}
_ => {}
},
// BETWEEN
Expr::Between {
expr: main_expr,
low,
high,
..
} => {
self.transform_expression_subqueries(main_expr)?;
self.transform_expression_subqueries(low)?;
self.transform_expression_subqueries(high)?;
}
// IN Liste
Expr::InList {
expr: main_expr,
list,
..
} => {
self.transform_expression_subqueries(main_expr)?;
for list_expr in list {
self.transform_expression_subqueries(list_expr)?;
}
}
// IS NULL/IS NOT NULL
Expr::IsNull(inner) | Expr::IsNotNull(inner) => {
self.transform_expression_subqueries(inner)?;
}
// Andere Expression-Typen benötigen keine Transformation
_ => {}
}
Ok(())
}
/// Fügt Tombstone-Filter zu SELECT-Statements hinzu (nur wenn nicht explizit in WHERE gesetzt)
fn add_tombstone_filters_to_select(
&self,
select: &mut sqlparser::ast::Select,
) -> Result<(), DatabaseError> {
// Sammle alle CRDT-Tabellen mit ihren Aliasen
let mut crdt_tables = Vec::new();
for twj in &select.from {
if let TableFactor::Table { name, alias, .. } = &twj.relation {
if self.is_crdt_sync_table(name) {
let table_alias = alias.as_ref().map(|a| a.name.value.as_str());
crdt_tables.push((name.clone(), table_alias));
}
}
}
if crdt_tables.is_empty() {
return Ok(());
}
// Prüfe, welche Tombstone-Spalten bereits in der WHERE-Klausel referenziert werden
let explicitly_filtered_tables = if let Some(where_clause) = &select.selection {
self.find_explicitly_filtered_tombstone_tables(where_clause, &crdt_tables)
} else {
HashSet::new()
};
// Erstelle Filter nur für Tabellen, die noch nicht explizit gefiltert werden
let mut tombstone_filters = Vec::new();
for (table_name, table_alias) in crdt_tables {
let table_name_string = table_name.to_string();
let table_key = table_alias.unwrap_or(&table_name_string);
if !explicitly_filtered_tables.contains(table_key) {
tombstone_filters.push(self.columns.create_tombstone_filter(table_alias));
}
}
// Füge die automatischen Filter hinzu
if !tombstone_filters.is_empty() {
let combined_filter = tombstone_filters
.into_iter()
.reduce(|acc, expr| Expr::BinaryOp {
left: Box::new(acc),
op: BinaryOperator::And,
right: Box::new(expr),
})
.unwrap();
match &mut select.selection {
Some(existing) => {
*existing = Expr::BinaryOp {
left: Box::new(existing.clone()),
op: BinaryOperator::And,
right: Box::new(combined_filter),
};
}
None => {
select.selection = Some(combined_filter);
}
}
}
Ok(())
}
/// Findet alle Tabellen, die bereits explizit Tombstone-Filter in der WHERE-Klausel haben
fn find_explicitly_filtered_tombstone_tables(
&self,
where_expr: &Expr,
crdt_tables: &[(ObjectName, Option<&str>)],
) -> HashSet<String> {
let mut filtered_tables = HashSet::new();
self.scan_expression_for_tombstone_references(
where_expr,
crdt_tables,
&mut filtered_tables,
);
filtered_tables
}
/// Rekursiv durchsucht einen Expression-Baum nach Tombstone-Spalten-Referenzen
fn scan_expression_for_tombstone_references(
&self,
expr: &Expr,
crdt_tables: &[(ObjectName, Option<&str>)],
filtered_tables: &mut HashSet<String>,
) {
match expr {
// Einfache Spaltenreferenz: tombstone = ?
Expr::Identifier(ident) => {
if ident.value == self.columns.tombstone {
// Wenn keine Tabelle spezifiziert ist und es nur eine CRDT-Tabelle gibt
if crdt_tables.len() == 1 {
let table_name_str = crdt_tables[0].0.to_string();
let table_key = crdt_tables[0].1.unwrap_or(&table_name_str);
filtered_tables.insert(table_key.to_string());
}
}
}
// Qualifizierte Spaltenreferenz: table.tombstone = ? oder alias.tombstone = ?
Expr::CompoundIdentifier(idents) => {
if idents.len() == 2 && idents[1].value == self.columns.tombstone {
let table_ref = &idents[0].value;
// Prüfe, ob es eine unserer CRDT-Tabellen ist (nach Name oder Alias)
for (table_name, alias) in crdt_tables {
let table_name_str = table_name.to_string();
if table_ref == &table_name_str || alias.map_or(false, |a| a == table_ref) {
filtered_tables.insert(table_ref.clone());
break;
}
}
}
}
// Binäre Operationen: AND, OR, etc.
Expr::BinaryOp { left, right, .. } => {
self.scan_expression_for_tombstone_references(left, crdt_tables, filtered_tables);
self.scan_expression_for_tombstone_references(right, crdt_tables, filtered_tables);
}
// Unäre Operationen: NOT, etc.
Expr::UnaryOp { expr, .. } => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
}
// Verschachtelte Ausdrücke
Expr::Nested(nested) => {
self.scan_expression_for_tombstone_references(nested, crdt_tables, filtered_tables);
}
// IN-Klauseln
Expr::InList { expr, .. } => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
}
// BETWEEN-Klauseln
Expr::Between { expr, .. } => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
}
// IS NULL/IS NOT NULL
Expr::IsNull(expr) | Expr::IsNotNull(expr) => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
}
// Funktionsaufrufe - KORRIGIERT
Expr::Function(func) => {
match &func.args {
sqlparser::ast::FunctionArguments::List(
sqlparser::ast::FunctionArgumentList { args, .. },
) => {
for arg in args {
if let sqlparser::ast::FunctionArg::Unnamed(
sqlparser::ast::FunctionArgExpr::Expr(expr),
) = arg
{
self.scan_expression_for_tombstone_references(
expr,
crdt_tables,
filtered_tables,
);
}
}
}
_ => {} // Andere FunctionArguments-Varianten ignorieren
}
}
// CASE-Ausdrücke - KORRIGIERT
Expr::Case {
operand,
conditions,
else_result,
..
} => {
if let Some(op) = operand {
self.scan_expression_for_tombstone_references(op, crdt_tables, filtered_tables);
}
for case_when in conditions {
self.scan_expression_for_tombstone_references(
&case_when.condition,
crdt_tables,
filtered_tables,
);
self.scan_expression_for_tombstone_references(
&case_when.result,
crdt_tables,
filtered_tables,
);
}
if let Some(else_res) = else_result {
self.scan_expression_for_tombstone_references(
else_res,
crdt_tables,
filtered_tables,
);
}
}
// Subqueries mit vollständiger Unterstützung
Expr::Subquery(query) => {
self.transform_query_recursive_for_tombstone_analysis(
query,
crdt_tables,
filtered_tables,
)
.ok();
}
// EXISTS/NOT EXISTS Subqueries
Expr::Exists { subquery, .. } => {
self.transform_query_recursive_for_tombstone_analysis(
subquery,
crdt_tables,
filtered_tables,
)
.ok();
}
// IN/NOT IN Subqueries
Expr::InSubquery { expr, subquery, .. } => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
self.transform_query_recursive_for_tombstone_analysis(
subquery,
crdt_tables,
filtered_tables,
)
.ok();
}
// ANY/ALL Subqueries
Expr::AnyOp { left, right, .. } | Expr::AllOp { left, right, .. } => {
self.scan_expression_for_tombstone_references(left, crdt_tables, filtered_tables);
self.scan_expression_for_tombstone_references(right, crdt_tables, filtered_tables);
}
// Andere Expression-Typen ignorieren wir für jetzt
_ => {}
}
}
/// Analysiert eine Subquery und sammelt Tombstone-Referenzen
fn transform_query_recursive_for_tombstone_analysis(
&self,
query: &sqlparser::ast::Query,
crdt_tables: &[(ObjectName, Option<&str>)],
filtered_tables: &mut HashSet<String>,
) -> Result<(), DatabaseError> {
self.analyze_set_expr_for_tombstone_references(&query.body, crdt_tables, filtered_tables)
}
/// Rekursiv analysiert SetExpr für Tombstone-Referenzen
fn analyze_set_expr_for_tombstone_references(
&self,
set_expr: &SetExpr,
crdt_tables: &[(ObjectName, Option<&str>)],
filtered_tables: &mut HashSet<String>,
) -> Result<(), DatabaseError> {
match set_expr {
SetExpr::Select(select) => {
// Analysiere WHERE-Klausel
if let Some(where_clause) = &select.selection {
self.scan_expression_for_tombstone_references(
where_clause,
crdt_tables,
filtered_tables,
);
}
// Analysiere alle Projektionen (können auch Subqueries enthalten)
for projection in &select.projection {
match projection {
SelectItem::UnnamedExpr(expr) | SelectItem::ExprWithAlias { expr, .. } => {
self.scan_expression_for_tombstone_references(
expr,
crdt_tables,
filtered_tables,
);
}
_ => {} // Wildcard projections ignorieren
}
}
// Analysiere GROUP BY
match &select.group_by {
sqlparser::ast::GroupByExpr::All(_) => {
// GROUP BY ALL - keine Expressions zu analysieren
}
sqlparser::ast::GroupByExpr::Expressions(exprs, _) => {
for group_expr in exprs {
self.scan_expression_for_tombstone_references(
group_expr,
crdt_tables,
filtered_tables,
);
}
}
}
// Analysiere HAVING
if let Some(having) = &select.having {
self.scan_expression_for_tombstone_references(
having,
crdt_tables,
filtered_tables,
);
}
}
SetExpr::SetOperation { left, right, .. } => {
self.analyze_set_expr_for_tombstone_references(left, crdt_tables, filtered_tables)?;
self.analyze_set_expr_for_tombstone_references(
right,
crdt_tables,
filtered_tables,
)?;
}
SetExpr::Query(query) => {
self.analyze_set_expr_for_tombstone_references(
&query.body,
crdt_tables,
filtered_tables,
)?;
}
SetExpr::Values(values) => {
// Analysiere Values-Listen
for row in &values.rows {
for expr in row {
self.scan_expression_for_tombstone_references(
expr,
crdt_tables,
filtered_tables,
);
}
}
}
_ => {} // Andere Varianten
}
Ok(())
}
/// Transformiert INSERT-Statements (fügt HLC-Timestamp hinzu)
fn transform_insert(
&self,
insert_stmt: &mut Insert,
timestamp: &Timestamp,
) -> Result<(), DatabaseError> {
insert_stmt
.columns
.push(Ident::new(self.columns.hlc_timestamp));
match insert_stmt.source.as_mut() {
Some(query) => match &mut *query.body {
SetExpr::Values(values) => {
for row in &mut values.rows {
row.push(Expr::Value(
Value::SingleQuotedString(timestamp.to_string()).into(),
));
}
}
SetExpr::Select(select) => {
let hlc_expr =
Expr::Value(Value::SingleQuotedString(timestamp.to_string()).into());
select.projection.push(SelectItem::UnnamedExpr(hlc_expr));
}
_ => {
return Err(DatabaseError::UnsupportedStatement {
statement_type: "INSERT".to_string(),
description: "INSERT with unsupported source type".to_string(),
});
}
},
None => {
return Err(DatabaseError::UnsupportedStatement {
statement_type: "INSERT".to_string(),
description: "INSERT statement has no source".to_string(),
});
}
}
Ok(())
}
/// Transformiert DELETE zu UPDATE (soft delete)
fn transform_delete_to_update(
&self,
stmt: &mut Statement,
timestamp: &Timestamp,
) -> Result<(), DatabaseError> {
if let Statement::Delete(del_stmt) = stmt {
let table_to_update = match &del_stmt.from {
sqlparser::ast::FromTable::WithFromKeyword(from)
| sqlparser::ast::FromTable::WithoutKeyword(from) => {
if from.len() == 1 {
from[0].clone()
} else {
return Err(DatabaseError::UnsupportedStatement {
statement_type: "DELETE".to_string(),
description: "DELETE with multiple tables not supported".to_string(),
});
}
}
};
let assignments = vec![
self.columns.create_tombstone_assignment(),
self.columns.create_hlc_assignment(timestamp),
];
*stmt = Statement::Update {
table: table_to_update,
assignments,
from: None,
selection: del_stmt.selection.clone(),
returning: None,
or: None,
};
}
Ok(())
}
/// Extrahiert Tabellennamen aus DELETE-Statement
fn extract_table_name_from_delete(
&self,
del_stmt: &sqlparser::ast::Delete,
) -> Option<ObjectName> {
let tables = match &del_stmt.from {
sqlparser::ast::FromTable::WithFromKeyword(from)
| sqlparser::ast::FromTable::WithoutKeyword(from) => from,
};
if tables.len() == 1 {
if let TableFactor::Table { name, .. } = &tables[0].relation {
Some(name.clone())
} else {
None
}
} else {
None
}
}
}

View File

@ -77,53 +77,6 @@ pub enum TriggerSetupResult {
TableNotFound,
}
/* fn set_sync_active(conn: &mut Connection) -> RusqliteResult<()> {
let sql = format!(
"INSERT OR REPLACE INTO \"{meta_table}\" (key, value) VALUES (?, '1');",
meta_table = TABLE_CRDT_CONFIGS
);
conn.execute(&sql, [SYNC_ACTIVE_KEY])?;
Ok(())
} */
/* fn clear_sync_active(conn: &mut Connection) -> RusqliteResult<()> {
let sql = format!(
"DELETE FROM \"{meta_table}\" WHERE key = ?;",
meta_table = TABLE_CRDT_CONFIGS
);
conn.execute(&sql, [SYNC_ACTIVE_KEY])?;
Ok(())
} */
/// Führt eine Aktion aus, während die Trigger temporär deaktiviert sind.
/// Diese Funktion stellt sicher, dass die Trigger auch bei einem Absturz (Panic)
/// wieder aktiviert werden.
/* pub fn with_triggers_paused<F, R>(conn: &mut Connection, action: F) -> RusqliteResult<R>
where
F: FnOnce(&mut Connection) -> RusqliteResult<R>,
{
// AssertUnwindSafe wird benötigt, um den Mutex über eine Panic-Grenze hinweg zu verwenden.
// Wir fangen einen möglichen Panic in `action` ab.
let result = panic::catch_unwind(AssertUnwindSafe(|| action(conn)));
// Diese Aktion MUSS immer ausgeführt werden, egal ob `action` erfolgreich war oder nicht.
match result {
Ok(res) => res, // Alles gut, gib das Ergebnis von `action` zurück.
Err(e) => panic::resume_unwind(e), // Ein Panic ist aufgetreten, wir geben ihn weiter, nachdem wir aufgeräumt haben.
}
} */
/// Erstellt die benötigte Meta-Tabelle, falls sie nicht existiert.
/* pub fn setup_meta_table(conn: &mut Connection) -> RusqliteResult<()> {
let sql = format!(
"CREATE TABLE IF NOT EXISTS \"{meta_table}\" (key TEXT PRIMARY KEY, value TEXT) WITHOUT ROWID;",
meta_table = TABLE_CRDT_CONFIGS
);
conn.execute(&sql, [])?;
Ok(())
} */
#[derive(Debug)]
struct ColumnInfo {
name: String,
@ -145,19 +98,11 @@ fn is_safe_identifier(name: &str) -> bool {
/// Richtet CRDT-Trigger für eine einzelne Tabelle ein.
pub fn setup_triggers_for_table(
conn: &mut Connection,
tx: &Transaction,
table_name: &str,
recreate: &bool,
recreate: bool,
) -> Result<TriggerSetupResult, CrdtSetupError> {
if !is_safe_identifier(table_name) {
return Err(rusqlite::Error::InvalidParameterName(format!(
"Invalid or unsafe table name provided: {}",
table_name
))
.into());
}
let columns = get_table_schema(conn, table_name)?;
let columns = get_table_schema(tx, table_name)?;
if columns.is_empty() {
return Ok(TriggerSetupResult::TableNotFound);
@ -198,23 +143,26 @@ pub fn setup_triggers_for_table(
let insert_trigger_sql = generate_insert_trigger_sql(table_name, &pks, &cols_to_track);
let update_trigger_sql = generate_update_trigger_sql(table_name, &pks, &cols_to_track);
let sql_batch = format!("{}\n{}", insert_trigger_sql, update_trigger_sql);
// Führe die Erstellung innerhalb einer Transaktion aus
let tx = conn.transaction()?;
if *recreate {
if recreate {
drop_triggers_for_table(&tx, table_name)?;
}
tx.execute_batch(&sql_batch)?;
tx.commit()?;
tx.execute_batch(&insert_trigger_sql)?;
tx.execute_batch(&update_trigger_sql)?;
Ok(TriggerSetupResult::Success)
}
/// Holt das Schema für eine gegebene Tabelle.
/// WICHTIG: Dies ist eine private Hilfsfunktion. Sie geht davon aus, dass `table_name`
/// bereits vom öffentlichen Aufrufer (setup_triggers_for_table) validiert wurde.
fn get_table_schema(conn: &Connection, table_name: &str) -> RusqliteResult<Vec<ColumnInfo>> {
if !is_safe_identifier(table_name) {
return Err(rusqlite::Error::InvalidParameterName(format!(
"Invalid or unsafe table name provided: {}",
table_name
))
.into());
}
let sql = format!("PRAGMA table_info(\"{}\");", table_name);
let mut stmt = conn.prepare(&sql)?;
let rows = stmt.query_map([], ColumnInfo::from_row)?;
@ -399,74 +347,3 @@ fn generate_update_trigger_sql(table_name: &str, pks: &[String], cols: &[String]
END;"
)
}
/* fn generate_update_trigger_sql(table_name: &str, pks: &[String], cols: &[String]) -> String {
let pk_json_payload = pks
.iter()
.map(|pk| format!("'{}', NEW.\"{}\"", pk, pk))
.collect::<Vec<_>>()
.join(", ");
let column_updates = cols.iter().fold(String::new(), |mut acc, col| {
writeln!(&mut acc, " IF NEW.\"{column}\" IS NOT OLD.\"{column}\" THEN INSERT INTO {log_table} (hlc_timestamp, op_type, table_name, row_pk, column_name, value, old_value) VALUES (NEW.\"{hlc_col}\", 'UPDATE', '{table}', json_object({pk_payload}), '{column}', json_object('value', NEW.\"{column}\"), json_object('value', OLD.\"{column}\")); END IF;",
log_table = TABLE_CRDT_LOGS,
hlc_col = HLC_TIMESTAMP_COLUMN,
table = table_name,
pk_payload = pk_json_payload,
column = col
).unwrap();
acc
});
let soft_delete_logic = format!(
" IF NEW.\"{tombstone_col}\" = 1 AND OLD.\"{tombstone_col}\" = 0 THEN INSERT INTO {log_table} (hlc_timestamp, op_type, table_name, row_pk) VALUES (NEW.\"{hlc_col}\", 'DELETE', '{table}', json_object({pk_payload})); END IF;",
log_table = TABLE_CRDT_LOGS,
hlc_col = HLC_TIMESTAMP_COLUMN,
tombstone_col = TOMBSTONE_COLUMN,
table = table_name,
pk_payload = pk_json_payload
);
let trigger_name = UPDATE_TRIGGER_TPL.replace("{TABLE_NAME}", table_name);
format!(
"CREATE TRIGGER IF NOT EXISTS \"{trigger_name}\"
AFTER UPDATE ON \"{table_name}\"
WHEN (SELECT value FROM \"{config_table}\" WHERE key = '{sync_key}') IS NOT '1'
FOR EACH ROW
BEGIN
{column_updates}
{soft_delete_logic}
END;",
config_table = TABLE_CRDT_CONFIGS,
sync_key = SYNC_ACTIVE_KEY
)
}
*/
/*
/// Durchläuft alle `haex_`-Tabellen und richtet die CRDT-Trigger ein.
pub fn generate_haex_triggers(conn: &mut Connection) -> Result<(), rusqlite::Error> {
println!("🔄 Setup CRDT triggers...");
let table_names: Vec<String> = {
let mut stmt = conn.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'haex_%' AND name NOT LIKE 'haex_crdt_%';")?;
let rows = stmt.query_map([], |row| row.get::<_, String>(0))?;
rows.collect::<RusqliteResult<Vec<String>>>()?
};
for table_name in table_names {
if table_name == TABLE_CRDT_CONFIGS {
continue;
}
println!("➡️ Processing table: {}", table_name);
match setup_triggers_for_table(conn, &table_name) {
Ok(TriggerSetupResult::Success) => {
println!(" ✅ Triggers created for {}", table_name)
}
Ok(TriggerSetupResult::TableNotFound) => {
println!(" Table {} not found, skipping.", table_name)
}
Err(e) => println!(" ❌ Could not set up triggers for {}: {}", table_name, e),
}
}
println!("✨ Done setting up CRDT triggers.");
Ok(())
} */

View File

@ -1,276 +0,0 @@
// Wir binden die Konstanten aus unserem generierten Modul ein.
// `crate` bezieht sich auf das Wurzelverzeichnis unseres Crates (src-tauri/src).
use crate::tableNames::*;
use rusqlite::{Connection, Result as RusqliteResult, Row};
use serde::Serialize;
use std::error::Error;
use std::fmt::{self, Display, Formatter, Write};
use std::panic::{self, AssertUnwindSafe};
use ts_rs::TS;
// Harte Konstanten, die nicht aus der JSON-Datei kommen, da sie Teil der internen Logik sind.
const SYNC_ACTIVE_KEY: &str = "sync_active";
const TOMBSTONE_COLUMN: &str = "haex_tombstone";
const HLC_TIMESTAMP_COLUMN: &str = "haex_hlc_timestamp";
const INSERT_TRIGGER_TPL: &str = "z_crdt_{TABLE_NAME}_insert";
const UPDATE_TRIGGER_TPL: &str = "z_crdt_{TABLE_NAME}_update";
// --- Eigener Error-Typ für klares Fehler-Handling ---
#[derive(Debug)]
pub enum CrdtSetupError {
DatabaseError(rusqlite::Error),
TombstoneColumnMissing {
table_name: String,
column_name: String,
},
PrimaryKeyMissing {
table_name: String,
},
}
impl Display for CrdtSetupError {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
CrdtSetupError::DatabaseError(e) => write!(f, "Database error: {}", e),
CrdtSetupError::TombstoneColumnMissing {
table_name,
column_name,
} => write!(
f,
"Table '{}' is missing the required tombstone column '{}'",
table_name, column_name
),
CrdtSetupError::PrimaryKeyMissing { table_name } => {
write!(f, "Table '{}' has no primary key", table_name)
}
}
}
}
impl Error for CrdtSetupError {}
impl From<rusqlite::Error> for CrdtSetupError {
fn from(err: rusqlite::Error) -> Self {
CrdtSetupError::DatabaseError(err)
}
}
// --- Öffentliche Structs und Enums ---
#[derive(Debug, Serialize, TS)]
#[ts(export)]
pub enum TriggerSetupResult {
Success,
TableNotFound,
}
#[derive(Debug)]
struct ColumnInfo {
name: String,
is_pk: bool,
}
impl ColumnInfo {
fn from_row(row: &Row) -> RusqliteResult<Self> {
Ok(ColumnInfo {
name: row.get("name")?,
is_pk: row.get::<_, i64>("pk")? > 0,
})
}
}
// --- Öffentliche Funktionen für die Anwendungslogik ---
/// Erstellt die benötigten CRDT-Systemtabellen (z.B. die Config-Tabelle), falls sie nicht existieren.
/// Sollte beim Anwendungsstart einmalig aufgerufen werden.
pub fn setup_crdt_tables(conn: &mut Connection) -> RusqliteResult<()> {
let config_sql = format!(
"CREATE TABLE IF NOT EXISTS \"{config_table}\" (key TEXT PRIMARY KEY, value TEXT) WITHOUT ROWID;",
config_table = TABLE_CRDT_CONFIGS
);
conn.execute(&config_sql, [])?;
Ok(())
}
/// Führt eine Aktion aus, während die Trigger temporär deaktiviert sind.
/// Stellt sicher, dass die Trigger auch bei einem Absturz (Panic) wieder aktiviert werden.
pub fn with_triggers_paused<F, R>(conn: &mut Connection, action: F) -> RusqliteResult<R>
where
F: FnOnce(&mut Connection) -> RusqliteResult<R>,
{
set_sync_active(conn)?;
// `catch_unwind` fängt einen möglichen Panic in `action` ab.
let result = panic::catch_unwind(AssertUnwindSafe(|| action(conn)));
// Diese Aufräumaktion wird immer ausgeführt.
clear_sync_active(conn)?;
match result {
Ok(res) => res, // Alles gut, gib das Ergebnis von `action` zurück.
Err(e) => panic::resume_unwind(e), // Ein Panic ist aufgetreten, wir geben ihn weiter, nachdem wir aufgeräumt haben.
}
}
/// Analysiert alle `haex_`-Tabellen in der Datenbank und erstellt die notwendigen CRDT-Trigger.
pub fn generate_haex_triggers(conn: &mut Connection) -> RusqliteResult<()> {
println!("🔄 Setup CRDT triggers...");
let table_names: Vec<String> = {
let mut stmt = conn.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'haex_%' AND name NOT LIKE 'haex_crdt_%';")?;
let rows = stmt.query_map([], |row| row.get::<_, String>(0))?;
rows.collect::<RusqliteResult<Vec<String>>>()?
};
for table_name in table_names {
// Überspringe die Config-Tabelle selbst, sie braucht keine Trigger.
if table_name == TABLE_CRDT_CONFIGS {
continue;
}
println!("➡️ Processing table: {}", table_name);
match setup_triggers_for_table(conn, &table_name) {
Ok(TriggerSetupResult::Success) => {
println!(" ✅ Triggers created for {}", table_name)
}
Ok(TriggerSetupResult::TableNotFound) => {
println!(" Table {} not found, skipping.", table_name)
}
Err(e) => println!(" ❌ Could not set up triggers for {}: {}", table_name, e),
}
}
println!("✨ Done setting up CRDT triggers.");
Ok(())
}
// --- Private Hilfsfunktionen ---
fn set_sync_active(conn: &mut Connection) -> RusqliteResult<()> {
let sql = format!(
"INSERT OR REPLACE INTO \"{config_table}\" (key, value) VALUES (?, '1');",
config_table = TABLE_CRDT_CONFIGS
);
conn.execute(&sql, [SYNC_ACTIVE_KEY])?;
Ok(())
}
fn clear_sync_active(conn: &mut Connection) -> RusqliteResult<()> {
let sql = format!(
"DELETE FROM \"{config_table}\" WHERE key = ?;",
config_table = TABLE_CRDT_CONFIGS
);
conn.execute(&sql, [SYNC_ACTIVE_KEY])?;
Ok(())
}
fn is_safe_identifier(name: &str) -> bool {
!name.is_empty() && name.chars().all(|c| c.is_alphanumeric() || c == '_')
}
fn setup_triggers_for_table(
conn: &mut Connection,
table_name: &str,
) -> Result<TriggerSetupResult, CrdtSetupError> {
if !is_safe_identifier(table_name) {
return Err(rusqlite::Error::InvalidParameterName(format!(
"Invalid table name: {}",
table_name
))
.into());
}
let columns = get_table_schema(conn, table_name)?;
if columns.is_empty() {
return Ok(TriggerSetupResult::TableNotFound);
}
if !columns.iter().any(|c| c.name == TOMBSTONE_COLUMN) {
return Err(CrdtSetupError::TombstoneColumnMissing {
table_name: table_name.to_string(),
column_name: TOMBSTONE_COLUMN.to_string(),
});
}
let pks: Vec<String> = columns
.iter()
.filter(|c| c.is_pk)
.map(|c| c.name.clone())
.collect();
if pks.is_empty() {
return Err(CrdtSetupError::PrimaryKeyMissing {
table_name: table_name.to_string(),
});
}
let cols_to_track: Vec<String> = columns
.iter()
.filter(|c| !c.is_pk && c.name != TOMBSTONE_COLUMN && c.name != HLC_TIMESTAMP_COLUMN)
.map(|c| c.name.clone())
.collect();
let insert_trigger_sql = generate_insert_trigger_sql(table_name, &pks, &cols_to_track);
let update_trigger_sql = generate_update_trigger_sql(table_name, &pks, &cols_to_track);
let drop_insert_trigger_sql =
drop_trigger_sql(INSERT_TRIGGER_TPL.replace("{TABLE_NAME}", table_name));
let drop_update_trigger_sql =
drop_trigger_sql(UPDATE_TRIGGER_TPL.replace("{TABLE_NAME}", table_name));
let tx = conn.transaction()?;
tx.execute_batch(&format!(
"{}\n{}\n{}\n{}",
drop_insert_trigger_sql, drop_update_trigger_sql, insert_trigger_sql, update_trigger_sql
))?;
tx.commit()?;
Ok(TriggerSetupResult::Success)
}
fn get_table_schema(conn: &Connection, table_name: &str) -> RusqliteResult<Vec<ColumnInfo>> {
let sql = format!("PRAGMA table_info(\"{}\");", table_name);
let mut stmt = conn.prepare(&sql)?;
let rows = stmt.query_map([], ColumnInfo::from_row)?;
rows.collect()
}
fn drop_trigger_sql(trigger_name: String) -> String {
format!("DROP TRIGGER IF EXISTS \"{}\";", trigger_name)
}
fn generate_insert_trigger_sql(table_name: &str, pks: &[String], cols: &[String]) -> String {
let pk_json_payload = pks
.iter()
.map(|pk| format!("'{}', NEW.\"{}\"", pk, pk))
.collect::<Vec<_>>()
.join(", ");
let column_inserts = cols.iter().fold(String::new(), |mut acc, col| {
writeln!(&mut acc, " INSERT INTO \"{log_table}\" (hlc_timestamp, op_type, table_name, row_pk, column_name, value) VALUES (NEW.\"{hlc_col}\", 'INSERT', '{table}', json_object({pk_payload}), '{column}', json_object('value', NEW.\"{column}\"));", log_table = TABLE_CRDT_LOGS, hlc_col = HLC_TIMESTAMP_COLUMN, table = table_name, pk_payload = pk_json_payload, column = col).unwrap();
acc
});
let trigger_name = INSERT_TRIGGER_TPL.replace("{TABLE_NAME}", table_name);
format!(
"CREATE TRIGGER IF NOT EXISTS \"{trigger_name}\"\n"
+ " AFTER INSERT ON \"{table_name}\"\n"
+ " WHEN (SELECT value FROM \"{config_table}\" WHERE key = '{sync_key}') IS NOT '1'\n"
+ " FOR EACH ROW\n"
+ " BEGIN\n"
+ " {column_inserts}\n"
+ " END;",
config_table = TABLE_CRDT_CONFIGS,
sync_key = SYNC_ACTIVE_KEY
)
}
fn generate_update_trigger_sql(table_name: &str, pks: &[String], cols: &[String]) -> String {
let pk_json_payload = pks
.iter()
.map(|pk| format!("'{}', NEW.\"{}\"", pk, pk))
.collect::<Vec<_>>()
.join(", ");
let column_updates = cols.iter().fold(String::new(), |mut acc, col| {
writeln!(&mut acc, " IF NEW.\"{column}\" IS NOT OLD.\"{column}\" THEN INSERT INTO \"{log_table}\" (hlc_timestamp, op_type, table_name, row_pk, column_name, value, old_value) VALUES (NEW.\"{hlc_col}\", 'UPDATE', '{table}', json_object({pk_payload}), '{column}', json_object('value', NEW.\"{column}\"), json_object('value', OLD.\"{column}\")); END IF;", log_table = TABLE_CRDT_LOGS, hlc_col = HLC_TIMESTAMP_COLUMN, table = table_name, pk_payload = pk_json_payload, column = col).unwrap();
acc
});
let soft_delete_logic = format!(
" IF NEW.\"{tombstone_col}\" = 1 AND OLD.\"{tombstone_col}\" = 0 THEN INSERT INTO \"{log_table}\" (hlc_timestamp, op_type, table_name, row_pk) VALUES (NEW.\"{hlc_col}\", 'DELETE', '{table}', json_object({pk_payload})); END IF;", log_table = TABLE_CRDT_LOGS, hlc_col = HLC_TIMESTAMP_COLUMN, tombstone_col = TOMBSTONE_COLUMN, table = table_name, pk_payload = pk_json_payload);
let trigger_name = UPDATE_TRIGGER_TPL.replace("{TABLE_NAME}", table_name);
format!(
"CREATE TRIGGER IF NOT EXISTS \"{trigger_name}\"\n"
+ " AFTER UPDATE ON \"{table_name}\"\n"
+ " WHEN (SELECT value FROM \"{config_table}\" WHERE key = '{sync_key}') IS NOT '1'\n"
+ " FOR EACH ROW\n"
+ " BEGIN\n"
+ " {column_updates}\n"
+ " {soft_delete_logic}\n"
+ " END;",
config_table = TABLE_CRDT_CONFIGS,
sync_key = SYNC_ACTIVE_KEY
)
}

View File

@ -1,246 +1,536 @@
// database/core.rs
// src-tauri/src/database/core.rs
use std::collections::HashMap;
use crate::database::error::DatabaseError;
use crate::database::DbConnection;
use base64::{engine::general_purpose::STANDARD, Engine as _};
use rusqlite::types::Value as SqlValue;
use rusqlite::{
types::{Value as RusqliteValue, ValueRef},
Connection, OpenFlags, ToSql,
};
use serde_json::Value as JsonValue;
use tauri::State;
// --- Hilfsfunktion: Konvertiert JSON Value zu etwas, das rusqlite versteht ---
// Diese Funktion ist etwas knifflig wegen Ownership und Lifetimes.
// Eine einfachere Variante ist oft, direkt rusqlite::types::Value zu erstellen.
// Hier ein Beispiel, das owned Values erstellt (braucht evtl. Anpassung je nach rusqlite-Version/Nutzung)
fn json_to_rusqlite_value(json_val: &JsonValue) -> Result<RusqliteValue, String> {
match json_val {
JsonValue::Null => Ok(RusqliteValue::Null),
JsonValue::Bool(b) => Ok(RusqliteValue::Integer(*b as i64)), // SQLite hat keinen BOOLEAN
JsonValue::Number(n) => {
if let Some(i) = n.as_i64() {
Ok(RusqliteValue::Integer(i))
} else if let Some(f) = n.as_f64() {
Ok(RusqliteValue::Real(f))
} else {
Err("Ungültiger Zahlenwert".to_string())
}
}
JsonValue::String(s) => Ok(RusqliteValue::Text(s.clone())),
JsonValue::Array(_) | JsonValue::Object(_) => {
// SQLite kann Arrays/Objects nicht direkt speichern (außer als TEXT/BLOB)
// Konvertiere sie zu JSON-Strings, wenn das gewünscht ist
Ok(RusqliteValue::Text(
serde_json::to_string(json_val).map_err(|e| e.to_string())?,
))
// Oder gib einen Fehler zurück, wenn Arrays/Objekte nicht erlaubt sind
// Err("Arrays oder Objekte werden nicht direkt als Parameter unterstützt".to_string())
}
}
}
pub async fn execute(
sql: String,
params: Vec<JsonValue>,
state: &State<'_, DbConnection>,
) -> Result<usize, String> {
// Gibt Anzahl betroffener Zeilen zurück
let params_converted: Vec<RusqliteValue> = params
.iter()
.map(json_to_rusqlite_value)
.collect::<Result<Vec<_>, _>>()?;
let params_sql: Vec<&dyn ToSql> = params_converted.iter().map(|v| v as &dyn ToSql).collect();
let db_lock = state
.0
.lock()
.map_err(|e| format!("Mutex Lock Fehler: {}", e))?;
let conn = db_lock.as_ref().ok_or("Keine Datenbankverbindung")?;
let affected_rows = conn
.execute(&sql, &params_sql[..])
.map_err(|e| format!("SQL Execute Fehler: {}", e))?;
Ok(affected_rows)
}
pub async fn select(
sql: String,
params: Vec<JsonValue>,
state: &State<'_, DbConnection>,
) -> Result<Vec<Vec<JsonValue>>, String> {
// Ergebnis als Vec<RowObject>
// Konvertiere JSON Params zu rusqlite Values für die Abfrage
// Wir sammeln sie als owned Values, da `params_from_iter` Referenzen braucht,
// was mit lokalen Konvertierungen schwierig ist.
let params_converted: Vec<RusqliteValue> = params
.iter()
.map(json_to_rusqlite_value)
.collect::<Result<Vec<_>, _>>()?; // Sammle Ergebnisse, gibt Fehler weiter
// Konvertiere zu Slice von ToSql-Referenzen (erfordert, dass die Values leben)
let params_sql: Vec<&dyn ToSql> = params_converted.iter().map(|v| v as &dyn ToSql).collect();
// Zugriff auf die Verbindung (blockierend, okay für SQLite in vielen Fällen)
let db_lock = state
.0
.lock()
.map_err(|e| format!("Mutex Lock Fehler: {}", e))?;
let conn = db_lock.as_ref().ok_or("Keine Datenbankverbindung")?;
let mut stmt = conn
.prepare(&sql)
.map_err(|e| format!("SQL Prepare Fehler: {}", e))?;
let column_names: Vec<String> = stmt
.column_names()
.into_iter()
.map(|s| s.to_string())
.collect();
let num_columns = column_names.len();
let mut rows = stmt
.query(&params_sql[..])
.map_err(|e| format!("SQL Query Fehler: {}", e))?;
let mut result_vec: Vec<Vec<JsonValue>> = Vec::new();
println!();
println!();
println!();
println!();
while let Some(row) = rows.next().map_err(|e| format!("Row Next Fehler: {}", e))? {
//let mut row_map = HashMap::new();
let mut row_data: Vec<JsonValue> = Vec::with_capacity(num_columns);
for i in 0..num_columns {
let col_name = &column_names[i];
println!(
"--- Processing Column --- Index: {}, Name: '{}'",
i, col_name
);
let value_ref = row
.get_ref(i)
.map_err(|e| format!("Get Ref Fehler Spalte {}: {}", i, e))?;
// Wandle rusqlite ValueRef zurück zu serde_json Value
let json_val = match value_ref {
ValueRef::Null => JsonValue::Null,
ValueRef::Integer(i) => JsonValue::Number(i.into()),
ValueRef::Real(f) => JsonValue::Number(
serde_json::Number::from_f64(f).unwrap_or(serde_json::Number::from(0)),
), // Fallback für NaN/Infinity
ValueRef::Text(t) => {
let s = String::from_utf8_lossy(t).to_string();
// Versuche, als JSON zu parsen, falls es ursprünglich ein Array/Objekt war
//serde_json::from_str(&s).unwrap_or(JsonValue::String(s))
JsonValue::String(s)
}
ValueRef::Blob(b) => {
// BLOBs z.B. als Base64-String zurückgeben
JsonValue::String(STANDARD.encode(b))
}
};
println!(
"new row: name: {} with value: {}",
column_names[i].clone(),
json_val,
);
row_data.push(json_val);
//row_map.insert(column_names[i].clone(), json_val);
}
//result_vec.push(row_map);
result_vec.push(row_data);
}
Ok(result_vec)
}
use sqlparser::ast::{Query, Select, SetExpr, Statement, TableFactor, TableObject};
use sqlparser::dialect::SQLiteDialect;
use sqlparser::parser::Parser;
/// Öffnet und initialisiert eine Datenbank mit Verschlüsselung
pub fn open_and_init_db(path: &str, key: &str, create: bool) -> Result<Connection, String> {
pub fn open_and_init_db(path: &str, key: &str, create: bool) -> Result<Connection, DatabaseError> {
let flags = if create {
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE
} else {
OpenFlags::SQLITE_OPEN_READ_WRITE
};
let conn = Connection::open_with_flags(path, flags).map_err(|e| {
format!(
"Datei gibt es nicht: {}. Habe nach {} gesucht",
e.to_string(),
path
)
})?;
conn.pragma_update(None, "key", key)
.map_err(|e| e.to_string())?;
let conn =
Connection::open_with_flags(path, flags).map_err(|e| DatabaseError::ConnectionFailed {
path: path.to_string(),
reason: e.to_string(),
})?;
/* conn.execute_batch("SELECT count(*) from haex_extensions")
.map_err(|e| e.to_string())?; */
conn.pragma_update(None, "key", key)
.map_err(|e| DatabaseError::PragmaError {
pragma: "key".to_string(),
reason: e.to_string(),
})?;
let journal_mode: String = conn
.query_row("PRAGMA journal_mode=WAL;", [], |row| row.get(0))
.map_err(|e| e.to_string())?;
.map_err(|e| DatabaseError::PragmaError {
pragma: "journal_mode=WAL".to_string(),
reason: e.to_string(),
})?;
if journal_mode.eq_ignore_ascii_case("wal") {
println!("WAL mode successfully enabled.");
} else {
eprintln!("Failed to enable WAL mode.");
eprintln!(
"Failed to enable WAL mode, journal_mode is '{}'.",
journal_mode
);
}
Ok(conn)
}
// Hilfsfunktionen für SQL-Parsing
pub fn extract_tables_from_query(query: &sqlparser::ast::Query) -> Vec<String> {
/// Utility für SQL-Parsing - parst ein einzelnes SQL-Statement
pub fn parse_single_statement(sql: &str) -> Result<Statement, DatabaseError> {
let dialect = SQLiteDialect {};
let statements = Parser::parse_sql(&dialect, sql).map_err(|e| DatabaseError::ParseError {
reason: e.to_string(),
sql: sql.to_string(),
})?;
statements
.into_iter()
.next()
.ok_or(DatabaseError::ParseError {
reason: "No SQL statement found".to_string(),
sql: sql.to_string(),
})
}
/// Utility für SQL-Parsing - parst mehrere SQL-Statements
pub fn parse_sql_statements(sql: &str) -> Result<Vec<Statement>, DatabaseError> {
let dialect = SQLiteDialect {};
Parser::parse_sql(&dialect, sql).map_err(|e| DatabaseError::ParseError {
reason: e.to_string(),
sql: sql.to_string(),
})
}
pub struct ValueConverter;
impl ValueConverter {
pub fn json_to_rusqlite_value(json_val: &JsonValue) -> Result<SqlValue, DatabaseError> {
match json_val {
JsonValue::Null => Ok(SqlValue::Null),
JsonValue::Bool(b) => {
// SQLite hat keinen Bool-Typ; verwende Integer 0/1
Ok(SqlValue::Integer(if *b { 1 } else { 0 }))
}
JsonValue::Number(n) => {
if let Some(i) = n.as_i64() {
Ok(SqlValue::Integer(i))
} else if let Some(f) = n.as_f64() {
Ok(SqlValue::Real(f))
} else {
// Fallback: als Text
Ok(SqlValue::Text(n.to_string()))
}
}
JsonValue::String(s) => Ok(SqlValue::Text(s.clone())),
JsonValue::Array(_) | JsonValue::Object(_) => {
// Arrays/Objects als JSON-Text speichern
serde_json::to_string(json_val)
.map(SqlValue::Text)
.map_err(|e| DatabaseError::SerializationError {
reason: format!("Failed to serialize JSON param: {}", e),
})
}
}
}
pub fn convert_params(params: &[JsonValue]) -> Result<Vec<SqlValue>, DatabaseError> {
params.iter().map(Self::json_to_rusqlite_value).collect()
}
}
pub fn execute(
sql: String,
params: Vec<JsonValue>,
connection: &DbConnection,
) -> Result<usize, DatabaseError> {
// Konvertiere Parameter
let params_converted: Vec<RusqliteValue> = params
.iter()
.map(ValueConverter::json_to_rusqlite_value)
.collect::<Result<Vec<_>, _>>()?;
let params_sql: Vec<&dyn ToSql> = params_converted.iter().map(|v| v as &dyn ToSql).collect();
with_connection(connection, |conn| {
let affected_rows = conn.execute(&sql, &params_sql[..]).map_err(|e| {
// "Lazy Parsing": Extrahiere den Tabellennamen nur, wenn ein Fehler auftritt,
// um den Overhead bei erfolgreichen Operationen zu vermeiden.
let table_name = extract_primary_table_name_from_sql(&sql).unwrap_or(None);
DatabaseError::ExecutionError {
sql: sql.clone(),
reason: e.to_string(),
table: table_name,
}
})?;
Ok(affected_rows)
})
}
pub fn select(
sql: String,
params: Vec<JsonValue>,
connection: &DbConnection,
) -> Result<Vec<HashMap<String, JsonValue>>, DatabaseError> {
// Validiere SQL-Statement
let statement = parse_single_statement(&sql)?;
// Stelle sicher, dass es eine Query ist
if !matches!(statement, Statement::Query(_)) {
return Err(DatabaseError::UnsupportedStatement {
statement_type: "Non-Query".to_string(),
description: "Only SELECT statements are allowed in select function".to_string(),
});
}
// Konvertiere Parameter
let params_converted: Vec<RusqliteValue> = params
.iter()
.map(ValueConverter::json_to_rusqlite_value)
.collect::<Result<Vec<_>, _>>()?;
let params_sql: Vec<&dyn ToSql> = params_converted.iter().map(|v| v as &dyn ToSql).collect();
with_connection(connection, |conn| {
let mut stmt = conn
.prepare(&sql)
.map_err(|e| DatabaseError::PrepareError {
reason: e.to_string(),
})?;
let column_names: Vec<String> = stmt
.column_names()
.into_iter()
.map(|s| s.to_string())
.collect();
let num_columns = column_names.len();
let mut rows = stmt
.query(&params_sql[..])
.map_err(|e| DatabaseError::QueryError {
reason: e.to_string(),
})?;
let mut result_vec: Vec<HashMap<String, JsonValue>> = Vec::new();
while let Some(row) = rows.next().map_err(|e| DatabaseError::RowProcessingError {
reason: format!("Row iteration error: {}", e),
})? {
let mut row_map: HashMap<String, JsonValue> = HashMap::with_capacity(num_columns);
for i in 0..num_columns {
let col_name = &column_names[i];
/* println!(
"--- Processing Column --- Index: {}, Name: '{}'",
i, col_name
); */
let value_ref = row
.get_ref(i)
.map_err(|e| DatabaseError::RowProcessingError {
reason: format!("Failed to get column {} ('{}'): {}", i, col_name, e),
})?;
let json_val = convert_value_ref_to_json(value_ref)?;
//println!("Column: {} = {}", column_names[i], json_val);
row_map.insert(col_name.clone(), json_val);
}
result_vec.push(row_map);
}
Ok(result_vec)
})
}
/// Konvertiert rusqlite ValueRef zu JSON
fn convert_value_ref_to_json(value_ref: ValueRef) -> Result<JsonValue, DatabaseError> {
let json_val = match value_ref {
ValueRef::Null => JsonValue::Null,
ValueRef::Integer(i) => JsonValue::Number(i.into()),
ValueRef::Real(f) => JsonValue::Number(
serde_json::Number::from_f64(f).unwrap_or_else(|| serde_json::Number::from(0)),
),
ValueRef::Text(t) => {
let s = String::from_utf8_lossy(t).to_string();
JsonValue::String(s)
}
ValueRef::Blob(b) => {
// BLOBs als Base64-String zurückgeben
JsonValue::String(STANDARD.encode(b))
}
};
Ok(json_val)
}
// Extrahiert alle Tabellennamen aus einem SQL-Statement über AST-Parsing
pub fn extract_table_names_from_sql(sql: &str) -> Result<Vec<String>, DatabaseError> {
let statement = parse_single_statement(sql)?;
Ok(extract_table_names_from_statement(&statement))
}
/// Extrahiert den ersten/primären Tabellennamen aus einem SQL-Statement
pub fn extract_primary_table_name_from_sql(sql: &str) -> Result<Option<String>, DatabaseError> {
let table_names = extract_table_names_from_sql(sql)?;
Ok(table_names.into_iter().next())
}
/// Extrahiert alle Tabellennamen aus einem AST Statement
pub fn extract_table_names_from_statement(statement: &Statement) -> Vec<String> {
let mut tables = Vec::new();
extract_tables_from_set_expr(&query.body, &mut tables);
match statement {
Statement::Query(query) => {
extract_tables_from_query_recursive(query, &mut tables);
}
Statement::Insert(insert) => {
if let TableObject::TableName(name) = &insert.table {
tables.push(name.to_string());
}
}
Statement::Update { table, .. } => {
extract_tables_from_table_factor(&table.relation, &mut tables);
}
Statement::Delete(delete) => {
use sqlparser::ast::FromTable;
match &delete.from {
FromTable::WithFromKeyword(table_refs) | FromTable::WithoutKeyword(table_refs) => {
for table_ref in table_refs {
extract_tables_from_table_factor(&table_ref.relation, &mut tables);
}
}
}
// Fallback für DELETE-Syntax ohne FROM
for table_name in &delete.tables {
tables.push(table_name.to_string());
}
}
Statement::CreateTable(create) => {
tables.push(create.name.to_string());
}
Statement::AlterTable { name, .. } => {
tables.push(name.to_string());
}
Statement::Drop { names, .. } => {
for name in names {
tables.push(name.to_string());
}
}
Statement::CreateIndex(create_index) => {
tables.push(create_index.table_name.to_string());
}
Statement::Truncate { table_names, .. } => {
for table_name in table_names {
tables.push(table_name.to_string());
}
}
// Weitere Statement-Typen können hier hinzugefügt werden
_ => {
// Für unbekannte Statement-Typen geben wir eine leere Liste zurück
}
}
tables
}
fn extract_tables_from_set_expr(set_expr: &sqlparser::ast::SetExpr, tables: &mut Vec<String>) {
match set_expr {
sqlparser::ast::SetExpr::Select(select) => {
for from in &select.from {
extract_tables_from_table_with_joins(from, tables);
}
/// Extrahiert Tabellennamen rekursiv aus Query-Strukturen
fn extract_tables_from_query_recursive(query: &Query, tables: &mut Vec<String>) {
extract_tables_from_set_expr_recursive(&query.body, tables);
}
/// Extrahiert Tabellennamen aus SELECT-Statements
fn extract_tables_from_select(select: &Select, tables: &mut Vec<String>) {
// FROM clause
for table_ref in &select.from {
extract_tables_from_table_factor(&table_ref.relation, tables);
// JOINs
for join in &table_ref.joins {
extract_tables_from_table_factor(&join.relation, tables);
}
sqlparser::ast::SetExpr::Query(query) => {
extract_tables_from_set_expr(&query.body, tables);
}
sqlparser::ast::SetExpr::SetOperation { left, right, .. } => {
extract_tables_from_set_expr(left, tables);
extract_tables_from_set_expr(right, tables);
}
_ => (), // Andere Fälle wie Values oder Insert ignorieren
}
}
fn extract_tables_from_table_with_joins(
table_with_joins: &sqlparser::ast::TableWithJoins,
tables: &mut Vec<String>,
) {
extract_tables_from_table_factor(&table_with_joins.relation, tables);
for join in &table_with_joins.joins {
extract_tables_from_table_factor(&join.relation, tables);
}
}
fn extract_tables_from_table_factor(
table_factor: &sqlparser::ast::TableFactor,
tables: &mut Vec<String>,
) {
/// Extrahiert Tabellennamen aus TableFactor-Strukturen
fn extract_tables_from_table_factor(table_factor: &TableFactor, tables: &mut Vec<String>) {
match table_factor {
sqlparser::ast::TableFactor::Table { name, .. } => {
TableFactor::Table { name, .. } => {
tables.push(name.to_string());
}
sqlparser::ast::TableFactor::Derived { subquery, .. } => {
extract_tables_from_set_expr(&subquery.body, tables);
TableFactor::Derived { subquery, .. } => {
extract_tables_from_query_recursive(subquery, tables);
}
sqlparser::ast::TableFactor::NestedJoin {
TableFactor::TableFunction { .. } => {
// Table functions haben normalerweise keine direkten Tabellennamen
}
TableFactor::NestedJoin {
table_with_joins, ..
} => {
extract_tables_from_table_with_joins(table_with_joins, tables);
extract_tables_from_table_factor(&table_with_joins.relation, tables);
for join in &table_with_joins.joins {
extract_tables_from_table_factor(&join.relation, tables);
}
}
_ => {
// TableFunction, UNNEST, JsonTable, etc. haben normalerweise keine direkten Tabellennamen
// oder sind nicht relevant für SQLite
}
_ => (), // Andere Fälle wie TableFunction ignorieren
}
}
/// Extrahiert Tabellennamen rekursiv aus SetExpr-Strukturen.
/// Diese Funktion enthält die eigentliche rekursive Logik.
fn extract_tables_from_set_expr_recursive(set_expr: &SetExpr, tables: &mut Vec<String>) {
match set_expr {
SetExpr::Select(select) => {
extract_tables_from_select(select, tables);
}
SetExpr::Query(sub_query) => {
extract_tables_from_set_expr_recursive(&sub_query.body, tables);
}
SetExpr::SetOperation { left, right, .. } => {
extract_tables_from_set_expr_recursive(left, tables);
extract_tables_from_set_expr_recursive(right, tables);
}
SetExpr::Values(_)
| SetExpr::Table(_)
| SetExpr::Insert(_)
| SetExpr::Update(_)
| SetExpr::Delete(_) => {}
}
}
pub fn with_connection<T, F>(connection: &DbConnection, f: F) -> Result<T, DatabaseError>
where
F: FnOnce(&mut Connection) -> Result<T, DatabaseError>,
{
let mut db_lock = connection
.0
.lock()
.map_err(|e| DatabaseError::MutexPoisoned {
reason: e.to_string(),
})?;
let conn = db_lock.as_mut().ok_or(DatabaseError::ConnectionError {
reason: "Connection to vault failed".to_string(),
})?;
f(conn)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_extract_simple_select() {
let sql = "SELECT * FROM users";
let tables = extract_table_names_from_sql(sql).unwrap();
assert_eq!(tables, vec!["users"]);
}
#[test]
fn test_extract_select_with_join() {
let sql = "SELECT u.name, p.title FROM users u JOIN posts p ON u.id = p.user_id";
let tables = extract_table_names_from_sql(sql).unwrap();
assert_eq!(tables, vec!["users", "posts"]);
}
#[test]
fn test_extract_insert() {
let sql = "INSERT INTO users (name, email) VALUES (?, ?)";
let tables = extract_table_names_from_sql(sql).unwrap();
assert_eq!(tables, vec!["users"]);
}
#[test]
fn test_extract_update() {
let sql = "UPDATE users SET name = ? WHERE id = ?";
let tables = extract_table_names_from_sql(sql).unwrap();
assert_eq!(tables, vec!["users"]);
}
#[test]
fn test_extract_delete() {
let sql = "DELETE FROM users WHERE id = ?";
let tables = extract_table_names_from_sql(sql).unwrap();
assert_eq!(tables, vec!["users"]);
}
#[test]
fn test_extract_create_table() {
let sql = "CREATE TABLE new_table (id INTEGER, name TEXT)";
let tables = extract_table_names_from_sql(sql).unwrap();
assert_eq!(tables, vec!["new_table"]);
}
#[test]
fn test_extract_subquery() {
let sql = "SELECT * FROM (SELECT id FROM users) AS sub";
let tables = extract_table_names_from_sql(sql).unwrap();
assert_eq!(tables, vec!["users"]);
}
#[test]
fn test_extract_primary_table() {
let sql = "SELECT u.name FROM users u JOIN posts p ON u.id = p.user_id";
let primary_table = extract_primary_table_name_from_sql(sql).unwrap();
assert_eq!(primary_table, Some("users".to_string()));
}
#[test]
fn test_extract_complex_query() {
let sql = r#"
SELECT u.name, COUNT(p.id) as post_count
FROM users u
LEFT JOIN posts p ON u.id = p.user_id
WHERE u.created_at > (SELECT MIN(created_at) FROM sessions)
GROUP BY u.id
"#;
let tables = extract_table_names_from_sql(sql).unwrap();
assert_eq!(tables, vec!["users", "posts", "sessions"]);
}
#[test]
fn test_invalid_sql() {
let sql = "INVALID SQL";
let result = extract_table_names_from_sql(sql);
assert!(result.is_err());
}
#[test]
fn test_parse_single_statement() {
let sql = "SELECT * FROM users WHERE id = ?";
let result = parse_single_statement(sql);
assert!(result.is_ok());
assert!(matches!(result.unwrap(), Statement::Query(_)));
}
#[test]
fn test_parse_invalid_sql() {
let sql = "INVALID SQL STATEMENT";
let result = parse_single_statement(sql);
assert!(matches!(result, Err(DatabaseError::ParseError { .. })));
}
#[test]
fn test_convert_value_ref_to_json() {
use rusqlite::types::ValueRef;
assert_eq!(
convert_value_ref_to_json(ValueRef::Null).unwrap(),
JsonValue::Null
);
assert_eq!(
convert_value_ref_to_json(ValueRef::Integer(42)).unwrap(),
JsonValue::Number(42.into())
);
assert_eq!(
convert_value_ref_to_json(ValueRef::Text(b"hello")).unwrap(),
JsonValue::String("hello".to_string())
);
}
// Test für die neuen AST-basierten Funktionen
#[test]
fn test_extract_table_names_comprehensive() {
// Test verschiedene SQL-Statement-Typen
assert_eq!(
extract_primary_table_name_from_sql("SELECT * FROM users WHERE id = 1").unwrap(),
Some("users".to_string())
);
assert_eq!(
extract_primary_table_name_from_sql("INSERT INTO products (name) VALUES ('test')")
.unwrap(),
Some("products".to_string())
);
assert_eq!(
extract_primary_table_name_from_sql("UPDATE orders SET status = 'completed'").unwrap(),
Some("orders".to_string())
);
assert_eq!(
extract_primary_table_name_from_sql("DELETE FROM customers").unwrap(),
Some("customers".to_string())
);
}
}

View File

@ -0,0 +1,159 @@
// src-tauri/src/database/error.rs
use serde::{Deserialize, Serialize};
use thiserror::Error;
use ts_rs::TS;
use crate::crdt::trigger::CrdtSetupError;
#[derive(Error, Debug, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(tag = "type", content = "details")]
pub enum DatabaseError {
/// Der SQL-Code konnte nicht geparst werden.
#[error("Failed to parse SQL: {reason} - SQL: {sql}")]
ParseError { reason: String, sql: String },
/// Parameter-Fehler (falsche Anzahl, ungültiger Typ, etc.)
#[error("Parameter error: {reason} (expected: {expected}, provided: {provided})")]
ParamError {
reason: String,
expected: usize,
provided: usize,
},
#[error("Failed to prepare statement: {reason}")]
PrepareError { reason: String },
#[error("Database error: {reason}")]
DatabaseError { reason: String },
/// Ein Fehler ist während der Ausführung in der Datenbank aufgetreten.
#[error("Execution error on table {}: {} - SQL: {}", table.as_deref().unwrap_or("unknown"), reason, sql)]
ExecutionError {
sql: String,
reason: String,
table: Option<String>,
},
/// Ein Fehler ist beim Verwalten der Transaktion aufgetreten.
#[error("Transaction error: {reason}")]
TransactionError { reason: String },
/// Ein SQL-Statement wird vom Proxy nicht unterstützt.
#[error("Unsupported statement type '{statement_type}': {description}")]
UnsupportedStatement {
statement_type: String,
description: String,
},
/// Fehler im HLC-Service
#[error("HLC error: {reason}")]
HlcError { reason: String },
/// Fehler beim Sperren der Datenbankverbindung
#[error("Lock error: {reason}")]
LockError { reason: String },
/// Fehler bei der Datenbankverbindung
#[error("Connection error: {reason}")]
ConnectionError { reason: String },
/// Fehler bei der JSON-Serialisierung
#[error("Serialization error: {reason}")]
SerializationError { reason: String },
#[error("Permission error for extension '{extension_id}': {reason} (operation: {}, resource: {})",
operation.as_deref().unwrap_or("unknown"),
resource.as_deref().unwrap_or("unknown"))]
PermissionError {
extension_id: String,
operation: Option<String>,
resource: Option<String>,
reason: String,
},
#[error("Query error: {reason}")]
QueryError { reason: String },
#[error("Row processing error: {reason}")]
RowProcessingError { reason: String },
#[error("Mutex Poisoned error: {reason}")]
MutexPoisoned { reason: String },
#[error("Datenbankverbindung fehlgeschlagen für Pfad '{path}': {reason}")]
ConnectionFailed { path: String, reason: String },
#[error("PRAGMA-Befehl '{pragma}' konnte nicht gesetzt werden: {reason}")]
PragmaError { pragma: String, reason: String },
#[error("Fehler beim Auflösen des Dateipfads: {reason}")]
PathResolutionError { reason: String },
#[error("Datei-I/O-Fehler für Pfad '{path}': {reason}")]
IoError { path: String, reason: String },
#[error("CRDT setup failed: {0}")]
CrdtSetup(String),
}
impl From<rusqlite::Error> for DatabaseError {
fn from(err: rusqlite::Error) -> Self {
DatabaseError::DatabaseError {
reason: err.to_string(),
}
}
}
impl From<String> for DatabaseError {
fn from(reason: String) -> Self {
DatabaseError::DatabaseError { reason }
}
}
impl From<CrdtSetupError> for DatabaseError {
fn from(err: CrdtSetupError) -> Self {
// Wir konvertieren den Fehler in einen String, um ihn einfach zu halten.
DatabaseError::CrdtSetup(err.to_string())
}
}
impl From<crate::extension::database::ExtensionDatabaseError> for DatabaseError {
fn from(err: crate::extension::database::ExtensionDatabaseError) -> Self {
match err {
crate::extension::database::ExtensionDatabaseError::Permission { source } => {
// Konvertiere PermissionError zu DatabaseError
match source {
crate::extension::database::permissions::PermissionError::AccessDenied {
extension_id,
operation,
resource,
reason,
} => DatabaseError::PermissionError {
extension_id,
operation: Some(operation),
resource: Some(resource),
reason,
},
crate::extension::database::permissions::PermissionError::Database {
source,
} => source,
other => DatabaseError::PermissionError {
extension_id: "unknown".to_string(),
operation: None,
resource: None,
reason: other.to_string(),
},
}
}
crate::extension::database::ExtensionDatabaseError::Database { source } => source,
crate::extension::database::ExtensionDatabaseError::ParameterValidation { reason } => {
DatabaseError::ParamError {
reason: reason.clone(),
expected: 0, // Kann nicht aus dem Grund extrahiert werden
provided: 0, // Kann nicht aus dem Grund extrahiert werden
}
}
crate::extension::database::ExtensionDatabaseError::StatementExecution { reason } => {
DatabaseError::ExecutionError {
sql: "unknown".to_string(),
reason,
table: None,
}
}
}
}
}

View File

@ -1,48 +1,41 @@
// database/mod.rs
// src-tauri/src/database/mod.rs
pub mod core;
pub mod error;
use rusqlite::Connection;
use serde_json::Value as JsonValue;
use std::fs;
use std::collections::HashMap;
use std::path::Path;
use std::str::FromStr;
use std::sync::{Arc, Mutex};
use std::sync::Mutex;
use std::{fs, sync::Arc};
use tauri::{path::BaseDirectory, AppHandle, Manager, State};
use crate::crdt::trigger;
use crate::database::core::open_and_init_db;
pub struct HlcService(pub Mutex<uhlc::HLC>);
use crate::crdt::hlc::HlcService;
use crate::database::error::DatabaseError;
pub struct DbConnection(pub Arc<Mutex<Option<Connection>>>);
#[tauri::command]
pub async fn sql_select(
sql: String,
params: Vec<JsonValue>,
state: State<'_, DbConnection>,
) -> Result<Vec<Vec<JsonValue>>, String> {
core::select(sql, params, &state).await
pub struct AppState {
pub db: DbConnection,
pub hlc: Mutex<HlcService>, // Kein Arc hier nötig, da der ganze AppState von Tauri in einem Arc verwaltet wird.
}
#[tauri::command]
pub async fn sql_execute(
pub fn sql_select(
sql: String,
params: Vec<JsonValue>,
state: State<'_, DbConnection>,
) -> Result<usize, String> {
core::execute(sql, params, &state).await
state: State<'_, AppState>,
) -> Result<Vec<HashMap<String, JsonValue>>, DatabaseError> {
core::select(sql, params, &state.db)
}
#[tauri::command]
pub fn test(app_handle: AppHandle) -> Result<String, String> {
let resource_path = app_handle
.path()
.resolve("database/vault.db", BaseDirectory::Resource)
.map_err(|e| format!("Fehler {}", e));
//let file = app_handle.fs().open(resource_path, {}).unwrap().read();
Ok(String::from(resource_path.unwrap().to_string_lossy()))
/* std::fs::exists(String::from(resource_path.unwrap().to_string_lossy()))
.map_err(|e| format!("Fehler: {}", e)) */
pub fn sql_execute(
sql: String,
params: Vec<JsonValue>,
state: State<'_, AppState>,
) -> Result<usize, DatabaseError> {
core::execute(sql, params, &state.db)
}
#[tauri::command]
@ -50,8 +43,8 @@ pub fn create_encrypted_database(
app_handle: AppHandle,
path: String,
key: String,
state: State<'_, DbConnection>,
) -> Result<String, String> {
state: State<'_, AppState>,
) -> Result<String, DatabaseError> {
// Ressourcenpfad zur eingebundenen Datenbank auflösen
println!("Arbeitsverzeichnis: {:?}", std::env::current_dir());
@ -68,14 +61,16 @@ pub fn create_encrypted_database(
let resource_path = app_handle
.path()
.resolve("temp_vault.db", BaseDirectory::AppLocalData)
.map_err(|e| format!("Fehler beim Auflösen des Ressourcenpfads: {}", e))?;
.map_err(|e| DatabaseError::PathResolutionError {
reason: e.to_string(),
})?;
// Prüfen, ob die Ressourcendatei existiert
if !resource_path.exists() {
return Err(format!(
"Ressourcendatenbank wurde nicht gefunden: {}",
resource_path.display()
));
return Err(DatabaseError::IoError {
path: resource_path.display().to_string(),
reason: "Ressourcendatenbank wurde nicht gefunden.".to_string(),
});
}
// Sicherstellen, dass das Zielverzeichnis existiert
@ -92,15 +87,10 @@ pub fn create_encrypted_database(
let target = Path::new(&path);
if target.exists() & target.is_file() {
println!(
"Datei '{}' existiert bereits. Sie wird gelöscht.",
target.display()
);
fs::remove_file(target)
.map_err(|e| format!("Kann Vault {} nicht löschen. \n {}", target.display(), e))?;
} else {
println!("Datei '{}' existiert nicht.", target.display());
fs::remove_file(target).map_err(|e| DatabaseError::IoError {
path: target.display().to_string(),
reason: format!("Bestehende Zieldatei konnte nicht gelöscht werden: {}", e),
})?;
}
println!(
@ -108,37 +98,43 @@ pub fn create_encrypted_database(
resource_path.as_path().display()
);
let conn = Connection::open(&resource_path).map_err(|e| {
format!(
"Fehler beim Öffnen der kopierten Datenbank: {}",
e.to_string()
)
let conn = Connection::open(&resource_path).map_err(|e| DatabaseError::ConnectionFailed {
path: resource_path.display().to_string(),
reason: format!(
"Fehler beim Öffnen der unverschlüsselten Quelldatenbank: {}",
e
),
})?;
println!("Hänge neue, verschlüsselte Datenbank an unter '{}'", &path);
// ATTACH DATABASE 'Dateiname' AS Alias KEY 'Passwort';
conn.execute("ATTACH DATABASE ?1 AS encrypted KEY ?2;", [&path, &key])
.map_err(|e| format!("Fehler bei ATTACH DATABASE: {}", e.to_string()))?;
.map_err(|e| DatabaseError::ExecutionError {
sql: "ATTACH DATABASE ...".to_string(),
reason: e.to_string(),
table: None,
})?;
println!(
"Exportiere Daten von 'main' nach 'encrypted' mit password {} ...",
&key
);
match conn.query_row("SELECT sqlcipher_export('encrypted');", [], |_row| Ok(())) {
Ok(_) => {
println!(">>> sqlcipher_export erfolgreich ausgeführt (Rückgabewert ignoriert).");
}
Err(e) => {
eprintln!("!!! FEHLER während sqlcipher_export: {}", e);
conn.execute("DETACH DATABASE encrypted;", []).ok(); // Versuche zu detachen
return Err(e.to_string()); // Gib den Fehler zurück
}
if let Err(e) = conn.query_row("SELECT sqlcipher_export('encrypted');", [], |_| Ok(())) {
// Versuche aufzuräumen, ignoriere Fehler dabei
let _ = conn.execute("DETACH DATABASE encrypted;", []);
return Err(DatabaseError::QueryError {
reason: format!("Fehler während sqlcipher_export: {}", e),
});
}
println!("Löse die verschlüsselte Datenbank vom Handle...");
conn.execute("DETACH DATABASE encrypted;", [])
.map_err(|e| format!("Fehler bei DETACH DATABASE: {}", e.to_string()))?;
.map_err(|e| DatabaseError::ExecutionError {
sql: "DETACH DATABASE ...".to_string(),
reason: e.to_string(),
table: None,
})?;
println!("Datenbank erfolgreich nach '{}' verschlüsselt.", &path);
println!(
@ -164,17 +160,19 @@ pub fn create_encrypted_database(
println!("resource_path: {}", resource_path.display());
// erstelle Trigger für haex_tables
conn.close()
.map_err(|(_, e)| DatabaseError::ConnectionFailed {
path: resource_path.display().to_string(),
reason: format!("Fehler beim Schließen der Quelldatenbank: {}", e),
})?;
conn.close().unwrap();
let new_conn = open_and_init_db(&path, &key, false)?;
let new_conn = core::open_and_init_db(&path, &key, false)?;
// Aktualisieren der Datenbankverbindung im State
let mut db = state
.0
.lock()
.map_err(|e| format!("Mutex-Fehler: {}", e.to_string()))?;
let mut db = state.db.0.lock().map_err(|e| DatabaseError::LockError {
reason: e.to_string(),
})?;
*db = Some(new_conn);
Ok(format!("Verschlüsselte CRDT-Datenbank erstellt",))
@ -182,11 +180,11 @@ pub fn create_encrypted_database(
#[tauri::command]
pub fn open_encrypted_database(
app_handle: AppHandle,
//app_handle: AppHandle,
path: String,
key: String,
state: State<'_, DbConnection>,
) -> Result<String, String> {
state: State<'_, AppState>,
) -> Result<String, DatabaseError> {
/* let vault_path = app_handle
.path()
.resolve(format!("vaults/{}", path), BaseDirectory::AppLocalData)
@ -194,92 +192,16 @@ pub fn open_encrypted_database(
.into_os_string()
.into_string()
.unwrap(); */
if !std::path::Path::new(&path).exists() {
/* if !std::path::Path::new(&path).exists() {
return Err(format!("File not found {}", path).into());
}
} */
let conn =
core::open_and_init_db(&path, &key, false).map_err(|e| format!("Error during open: {}", e));
let conn = core::open_and_init_db(&path, &key, false)
.map_err(|e| format!("Error during open: {}", e))?;
let mut db = state.0.lock().map_err(|e| e.to_string())?;
let mut db = state.db.0.lock().map_err(|e| e.to_string())?;
*db = Some(conn.unwrap());
*db = Some(conn);
Ok(format!("success"))
}
fn get_target_triple() -> Result<String, String> {
let target_triple = if cfg!(target_os = "linux") {
if cfg!(target_arch = "x86_64") {
"x86_64-unknown-linux-gnu".to_string()
} else if cfg!(target_arch = "aarch64") {
"aarch64-unknown-linux-gnu".to_string()
} else {
return Err(format!(
"Unbekannte Linux-Architektur: {}",
std::env::consts::ARCH
));
}
} else if cfg!(target_os = "macos") {
if cfg!(target_arch = "x86_64") {
"x86_64-apple-darwin".to_string()
} else if cfg!(target_arch = "aarch64") {
"aarch64-apple-darwin".to_string()
} else {
return Err(format!(
"Unbekannte macOS-Architektur: {}",
std::env::consts::ARCH
));
}
} else if cfg!(target_os = "windows") {
if cfg!(target_arch = "x86_64") {
"x86_64-pc-windows-msvc".to_string()
} else if cfg!(target_arch = "x86") {
"i686-pc-windows-msvc".to_string()
} else {
return Err(format!(
"Unbekannte Windows-Architektur: {}",
std::env::consts::ARCH
));
}
} else if cfg!(target_os = "android") {
if cfg!(target_arch = "aarch64") {
"aarch64-linux-android".to_string()
} else {
return Err(format!(
"Unbekannte Android-Architektur: {}",
std::env::consts::ARCH
));
}
} else if cfg!(target_os = "ios") {
if cfg!(target_arch = "aarch64") {
"aarch64-apple-ios".to_string()
} else {
return Err(format!(
"Unbekannte iOS-Architektur: {}",
std::env::consts::ARCH
));
}
} else {
return Err("Unbekanntes Zielsystem".to_string());
};
Ok(target_triple)
}
pub fn get_hlc_timestamp(state: tauri::State<HlcService>) -> String {
let hlc = state.0.lock().unwrap();
hlc.new_timestamp().to_string()
}
#[tauri::command]
pub fn update_hlc_from_remote(
remote_timestamp_str: String,
state: tauri::State<HlcService>,
) -> Result<(), String> {
let remote_ts =
uhlc::Timestamp::from_str(&remote_timestamp_str).map_err(|e| e.cause.to_string())?;
let hlc = state.0.lock().unwrap();
hlc.update_with_timestamp(&remote_ts)
.map_err(|e| format!("HLC update failed: {:?}", e))
}

View File

@ -1,48 +1,374 @@
mod permissions;
// src-tauri/src/extension/database/mod.rs
use crate::database;
use crate::database::DbConnection;
use crate::models::ExtensionState;
pub mod permissions;
use crate::crdt::hlc::HlcService;
use crate::crdt::transformer::CrdtTransformer;
use crate::crdt::trigger;
use crate::database::core::{parse_sql_statements, with_connection, ValueConverter};
use crate::database::error::DatabaseError;
use crate::database::AppState;
use permissions::{check_read_permission, check_write_permission, PermissionError};
use rusqlite::params_from_iter;
use rusqlite::types::Value as SqlValue;
use rusqlite::Transaction;
use serde_json::json;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use tauri::{AppHandle, State};
// Extension-bezogene Funktionen mit extension_-Präfix
/// Lädt eine Extension aus einer Manifest-Datei
/* #[tauri::command]
pub fn extension_load(
manifest_path: String,
app: AppHandle,
) -> Result<crate::models::ExtensionManifest, String> {
let manifest_content = std::fs::read_to_string(&manifest_path).map_err(|e| e.to_string())?;
let manifest: crate::models::ExtensionManifest =
serde_json::from_str(&manifest_content).map_err(|e| e.to_string())?;
app.state::<ExtensionState>()
.add_extension(manifest_path.clone(), manifest.clone());
Ok(manifest)
}
*/
/// Führt SQL-Leseoperationen mit Berechtigungsprüfung aus
#[tauri::command]
pub async fn extension_sql_select(
app: AppHandle,
extension_id: String,
sql: String,
params: Vec<JsonValue>,
state: State<'_, DbConnection>,
) -> Result<Vec<Vec<JsonValue>>, String> {
permissions::check_read_permission(&app, &extension_id, &sql).await?;
database::core::select(sql, params, &state).await
use sqlparser::ast::{Statement, TableFactor, TableObject};
use std::collections::HashSet;
use tauri::State;
use thiserror::Error;
/// Combined error type für Extension-Database operations
#[derive(Error, Debug)]
pub enum ExtensionDatabaseError {
#[error("Permission denied: {source}")]
Permission {
#[from]
source: PermissionError,
},
#[error("Database error: {source}")]
Database {
#[from]
source: DatabaseError,
},
#[error("Parameter validation failed: {reason}")]
ParameterValidation { reason: String },
#[error("Statement execution failed: {reason}")]
StatementExecution { reason: String },
}
// Für Tauri Command Serialization
impl serde::Serialize for ExtensionDatabaseError {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
/// Führt Statements mit korrekter Parameter-Bindung aus
pub struct StatementExecutor<'a> {
transaction: &'a Transaction<'a>,
hlc_service: &'a HlcService,
}
impl<'a> StatementExecutor<'a> {
fn new(transaction: &'a Transaction<'a>, hlc_service: &'a HlcService) -> Self {
Self {
transaction,
hlc_service,
}
}
/// Führt ein einzelnes Statement mit Parametern aus
fn execute_statement_with_params(
&self,
statement: &Statement,
params: &[SqlValue],
) -> Result<(), ExtensionDatabaseError> {
let sql = statement.to_string();
let expected_params = count_sql_placeholders(&sql);
if expected_params != params.len() {
return Err(ExtensionDatabaseError::ParameterValidation {
reason: format!(
"Parameter count mismatch for statement: {} (expected: {}, provided: {})",
truncate_sql(&sql, 100),
expected_params,
params.len()
),
});
}
self.transaction
.execute(&sql, params_from_iter(params.iter()))
.map_err(|e| ExtensionDatabaseError::StatementExecution {
reason: format!(
"Failed to execute statement on table {}: {}",
self.extract_table_name_from_statement(statement)
.unwrap_or_else(|| "unknown".to_string()),
e
),
})?;
Ok(())
}
/// Extrahiert den Tabellennamen aus einem Statement für bessere Fehlermeldungen
fn extract_table_name_from_statement(&self, statement: &Statement) -> Option<String> {
match statement {
Statement::Insert(insert) => {
if let TableObject::TableName(name) = &insert.table {
Some(name.to_string())
} else {
None
}
}
Statement::Update { table, .. } => {
if let TableFactor::Table { name, .. } = &table.relation {
Some(name.to_string())
} else {
None
}
}
Statement::Delete(delete) => {
// Verbessertes Extrahieren für DELETE
use sqlparser::ast::FromTable;
match &delete.from {
FromTable::WithFromKeyword(tables) | FromTable::WithoutKeyword(tables) => {
if !tables.is_empty() {
if let TableFactor::Table { name, .. } = &tables[0].relation {
Some(name.to_string())
} else {
None
}
} else if !delete.tables.is_empty() {
Some(delete.tables[0].to_string())
} else {
None
}
}
}
}
Statement::CreateTable(create) => Some(create.name.to_string()),
Statement::AlterTable { name, .. } => Some(name.to_string()),
Statement::Drop { names, .. } => names.first().map(|name| name.to_string()),
_ => None,
}
}
}
/// Führt SQL-Schreiboperationen mit Berechtigungsprüfung aus
#[tauri::command]
pub async fn extension_sql_execute(
app: AppHandle,
extension_id: String,
sql: String,
sql: &str,
params: Vec<JsonValue>,
state: State<'_, DbConnection>,
) -> Result<usize, String> {
permissions::check_write_permission(&app, &extension_id, &sql).await?;
database::core::execute(sql, params, &state).await
extension_id: String,
state: State<'_, AppState>,
hlc_service: State<'_, HlcService>,
) -> Result<Vec<String>, ExtensionDatabaseError> {
// Permission check
check_write_permission(&state.db, &extension_id, sql).await?;
// Parameter validation
validate_params(sql, &params)?;
// SQL parsing
let mut ast_vec = parse_sql_statements(sql)?;
// Database operation
with_connection(&state.db, |conn| {
let tx = conn.transaction().map_err(DatabaseError::from)?;
let transformer = CrdtTransformer::new();
let executor = StatementExecutor::new(&tx, &hlc_service);
// Generate HLC timestamp
let hlc_timestamp =
hlc_service
.new_timestamp_and_persist(&tx)
.map_err(|e| DatabaseError::HlcError {
reason: e.to_string(),
})?;
// Transform statements
let mut modified_schema_tables = HashSet::new();
for statement in &mut ast_vec {
if let Some(table_name) =
transformer.transform_execute_statement(statement, &hlc_timestamp)?
{
modified_schema_tables.insert(table_name);
}
}
// Convert parameters
let sql_values = ValueConverter::convert_params(&params)?;
// Execute statements
for statement in ast_vec {
executor.execute_statement_with_params(&statement, &sql_values)?;
if let Statement::CreateTable(create_table_details) = statement {
let table_name_str = create_table_details.name.to_string();
println!(
"Table '{}' created by extension, setting up CRDT triggers...",
table_name_str
);
trigger::setup_triggers_for_table(&tx, &table_name_str, false)?;
println!(
"Triggers for table '{}' successfully created.",
table_name_str
);
}
}
// Commit transaction
tx.commit().map_err(DatabaseError::from)?;
Ok(modified_schema_tables.into_iter().collect())
})
.map_err(ExtensionDatabaseError::from)
}
#[tauri::command]
pub async fn extension_sql_select(
sql: &str,
params: Vec<JsonValue>,
extension_id: String,
state: State<'_, AppState>,
) -> Result<Vec<JsonValue>, ExtensionDatabaseError> {
// Permission check
check_read_permission(&state.db, &extension_id, sql).await?;
// Parameter validation
validate_params(sql, &params)?;
// SQL parsing
let mut ast_vec = parse_sql_statements(sql)?;
if ast_vec.is_empty() {
return Ok(vec![]);
}
// Validate that all statements are queries
for stmt in &ast_vec {
if !matches!(stmt, Statement::Query(_)) {
return Err(ExtensionDatabaseError::StatementExecution {
reason: "Only SELECT statements are allowed in extension_sql_select".to_string(),
});
}
}
// Database operation
with_connection(&state.db, |conn| {
let sql_params = ValueConverter::convert_params(&params)?;
let transformer = CrdtTransformer::new();
// Use the last statement for result set
let last_statement = ast_vec.pop().unwrap();
let mut stmt_to_execute = last_statement;
// Transform the statement
transformer.transform_select_statement(&mut stmt_to_execute)?;
let transformed_sql = stmt_to_execute.to_string();
// Prepare and execute query
let mut prepared_stmt =
conn.prepare(&transformed_sql)
.map_err(|e| DatabaseError::ExecutionError {
sql: transformed_sql.clone(),
reason: e.to_string(),
table: None,
})?;
let column_names: Vec<String> = prepared_stmt
.column_names()
.into_iter()
.map(|s| s.to_string())
.collect();
let rows = prepared_stmt
.query_map(params_from_iter(sql_params.iter()), |row| {
row_to_json_value(row, &column_names)
})
.map_err(|e| DatabaseError::QueryError {
reason: e.to_string(),
})?;
let mut results = Vec::new();
for row_result in rows {
results.push(row_result.map_err(|e| DatabaseError::RowProcessingError {
reason: e.to_string(),
})?);
}
Ok(results)
})
.map_err(ExtensionDatabaseError::from)
}
/// Konvertiert eine SQLite-Zeile zu JSON
fn row_to_json_value(
row: &rusqlite::Row,
columns: &[String],
) -> Result<JsonValue, rusqlite::Error> {
let mut map = serde_json::Map::new();
for (i, col_name) in columns.iter().enumerate() {
let value = row.get::<usize, rusqlite::types::Value>(i)?;
let json_value = match value {
rusqlite::types::Value::Null => JsonValue::Null,
rusqlite::types::Value::Integer(i) => json!(i),
rusqlite::types::Value::Real(f) => json!(f),
rusqlite::types::Value::Text(s) => json!(s),
rusqlite::types::Value::Blob(blob) => json!(blob.to_vec()),
};
map.insert(col_name.clone(), json_value);
}
Ok(JsonValue::Object(map))
}
/// Validiert Parameter gegen SQL-Platzhalter
fn validate_params(sql: &str, params: &[JsonValue]) -> Result<(), ExtensionDatabaseError> {
let total_placeholders = count_sql_placeholders(sql);
if total_placeholders != params.len() {
return Err(ExtensionDatabaseError::ParameterValidation {
reason: format!(
"Parameter count mismatch: SQL has {} placeholders but {} parameters provided",
total_placeholders,
params.len()
),
});
}
Ok(())
}
/// Zählt SQL-Platzhalter (verbesserte Version)
fn count_sql_placeholders(sql: &str) -> usize {
sql.matches('?').count()
}
/// Kürzt SQL für Fehlermeldungen
fn truncate_sql(sql: &str, max_length: usize) -> String {
if sql.len() <= max_length {
sql.to_string()
} else {
format!("{}...", &sql[..max_length])
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_count_sql_placeholders() {
assert_eq!(
count_sql_placeholders("SELECT * FROM users WHERE id = ?"),
1
);
assert_eq!(
count_sql_placeholders("SELECT * FROM users WHERE id = ? AND name = ?"),
2
);
assert_eq!(count_sql_placeholders("SELECT * FROM users"), 0);
}
#[test]
fn test_truncate_sql() {
let sql = "SELECT * FROM very_long_table_name";
assert_eq!(truncate_sql(sql, 10), "SELECT * F...");
assert_eq!(truncate_sql(sql, 50), sql);
}
#[test]
fn test_validate_params() {
let params = vec![json!(1), json!("test")];
assert!(validate_params("SELECT * FROM users WHERE id = ? AND name = ?", &params).is_ok());
assert!(validate_params("SELECT * FROM users WHERE id = ?", &params).is_err());
assert!(validate_params("SELECT * FROM users", &params).is_err());
}
}

View File

@ -1,160 +1,206 @@
// database/permissions.rs
use crate::database::core::extract_tables_from_query;
// src-tauri/src/extension/database/permissions.rs
use crate::database::core::{
extract_table_names_from_sql, parse_single_statement, with_connection,
};
use crate::database::error::DatabaseError;
use crate::database::DbConnection;
use crate::models::DbExtensionPermission;
use sqlparser::dialect::SQLiteDialect;
use sqlparser::parser::Parser;
use tauri::{AppHandle, Manager};
use serde::{Deserialize, Serialize};
use sqlparser::ast::{Statement, TableFactor, TableObject};
use thiserror::Error;
/// Prüft Leseberechtigungen für eine Extension basierend auf Datenbankeinträgen
pub async fn check_read_permission(
app: &AppHandle,
extension_id: &str,
sql: &str,
) -> Result<(), String> {
// SQL-Statement parsen
let dialect = SQLiteDialect {};
let statements = Parser::parse_sql(&dialect, sql).map_err(|e| e.to_string())?;
let statement = statements
.into_iter()
.next()
.ok_or("Keine SQL-Anweisung gefunden")?;
#[derive(Error, Debug, Serialize, Deserialize)]
pub enum PermissionError {
#[error("Extension '{extension_id}' has no {operation} permission for {resource}: {reason}")]
AccessDenied {
extension_id: String,
operation: String,
resource: String,
reason: String,
},
#[error("Database error while checking permissions: {source}")]
Database {
#[from]
source: DatabaseError,
},
#[error("SQL parsing error: {reason}")]
SqlParse { reason: String },
#[error("Invalid SQL statement: {reason}")]
InvalidStatement { reason: String },
#[error("No SQL statement found")]
NoStatement,
#[error("Unsupported statement type for permission check")]
UnsupportedStatement,
#[error("No table specified in {statement_type} statement")]
NoTableSpecified { statement_type: String },
}
// Berechtigungsprüfung für SELECT-Statements
if let sqlparser::ast::Statement::Query(query) = statement {
let tables = extract_tables_from_query(&query);
// Berechtigungen aus der Datenbank abrufen
let db_state = app.state::<DbConnection>();
let permissions =
get_extension_permissions(db_state, extension_id, "database", "read").await?;
// Prüfen, ob alle benötigten Tabellen in den Berechtigungen enthalten sind
for table in tables {
let has_permission = permissions.iter().any(|perm| perm.path.contains(&table));
if !has_permission {
return Err(format!("Keine Leseberechtigung für Tabelle {}", table));
}
// Hilfsfunktion für bessere Lesbarkeit
impl PermissionError {
pub fn access_denied(
extension_id: &str,
operation: &str,
resource: &str,
reason: &str,
) -> Self {
Self::AccessDenied {
extension_id: extension_id.to_string(),
operation: operation.to_string(),
resource: resource.to_string(),
reason: reason.to_string(),
}
Ok(())
} else {
Err("Nur SELECT-Anweisungen erlaubt".into())
}
}
/// Prüft Schreibberechtigungen für eine Extension basierend auf Datenbankeinträgen
pub async fn check_write_permission(
app: &AppHandle,
/// Prüft Leseberechtigungen für eine Extension
pub async fn check_read_permission(
connection: &DbConnection,
extension_id: &str,
sql: &str,
) -> Result<(), String> {
// SQL-Statement parsen
let dialect = SQLiteDialect {};
let statements = Parser::parse_sql(&dialect, sql).map_err(|e| e.to_string())?;
let statement = statements
.into_iter()
.next()
.ok_or("Keine SQL-Anweisung gefunden")?;
) -> Result<(), PermissionError> {
let statement = parse_single_statement(sql).map_err(|e| PermissionError::SqlParse {
reason: e.to_string(),
})?;
// Berechtigungsprüfung basierend auf Statement-Typ
match statement {
sqlparser::ast::Statement::Insert(insert) => {
let table_name = match insert.table {
sqlparser::ast::TableObject::TableName(name) => name.to_string(),
_ => return Err("Ungültige Tabellenangabe in INSERT".into()),
};
// Berechtigungen aus der Datenbank abrufen
let db_state = app.state::<DbConnection>();
let permissions =
get_extension_permissions(db_state, extension_id, "database", "write").await?;
// Prüfen, ob die Tabelle in den Berechtigungen enthalten ist
let has_permission = permissions
.iter()
.any(|perm| perm.path.contains(&table_name));
if !has_permission {
return Err(format!(
"Keine Schreibberechtigung für Tabelle {}",
table_name
));
}
Statement::Query(query) => {
let tables = extract_table_names_from_sql(&query.to_string())?;
check_table_permissions(connection, extension_id, &tables, "read").await
}
sqlparser::ast::Statement::Update { table, .. } => {
let table_name = table.relation.to_string();
_ => Err(PermissionError::InvalidStatement {
reason: "Only SELECT statements are allowed for read operations".to_string(),
}),
}
}
// Berechtigungen aus der Datenbank abrufen
let db_state = app.state::<DbConnection>();
let permissions =
get_extension_permissions(db_state, extension_id, "database", "write").await?;
/// Prüft Schreibberechtigungen für eine Extension
pub async fn check_write_permission(
connection: &DbConnection,
extension_id: &str,
sql: &str,
) -> Result<(), PermissionError> {
let statement = parse_single_statement(sql).map_err(|e| PermissionError::SqlParse {
reason: e.to_string(),
})?;
// Prüfen, ob die Tabelle in den Berechtigungen enthalten ist
let has_permission = permissions
.iter()
.any(|perm| perm.path.contains(&table_name));
if !has_permission {
return Err(format!(
"Keine Schreibberechtigung für Tabelle {}",
table_name
));
}
match statement {
Statement::Insert(insert) => {
let table_name = extract_table_name_from_insert(&insert)?;
check_single_table_permission(connection, extension_id, &table_name, "write").await
}
sqlparser::ast::Statement::Delete(delete) => {
let from_tables = match delete.from {
sqlparser::ast::FromTable::WithFromKeyword(tables) => tables,
sqlparser::ast::FromTable::WithoutKeyword(tables) => tables,
};
if from_tables.is_empty() && delete.tables.is_empty() {
return Err("Keine Tabelle in DELETE angegeben".into());
}
let table_name = if !from_tables.is_empty() {
from_tables[0].relation.to_string()
} else {
delete.tables[0].to_string()
};
// Berechtigungen aus der Datenbank abrufen
let db_state = app.state::<DbConnection>();
let permissions =
get_extension_permissions(db_state, extension_id, "database", "write").await?;
// Prüfen, ob die Tabelle in den Berechtigungen enthalten ist
let has_permission = permissions
.iter()
.any(|perm| perm.path.contains(&table_name));
if !has_permission {
return Err(format!(
"Keine Schreibberechtigung für Tabelle {}",
table_name
));
}
Statement::Update { table, .. } => {
let table_name = extract_table_name_from_table_factor(&table.relation)?;
check_single_table_permission(connection, extension_id, &table_name, "write").await
}
sqlparser::ast::Statement::CreateTable(create_table) => {
Statement::Delete(delete) => {
// DELETE wird durch CRDT-Transform zu UPDATE mit tombstone = 1
let table_name = extract_table_name_from_delete(&delete)?;
check_single_table_permission(connection, extension_id, &table_name, "write").await
}
Statement::CreateTable(create_table) => {
let table_name = create_table.name.to_string();
check_single_table_permission(connection, extension_id, &table_name, "create").await
}
Statement::AlterTable { name, .. } => {
let table_name = name.to_string();
check_single_table_permission(connection, extension_id, &table_name, "alter").await
}
Statement::Drop { names, .. } => {
// Für DROP können mehrere Tabellen angegeben sein
let table_names: Vec<String> = names.iter().map(|name| name.to_string()).collect();
check_table_permissions(connection, extension_id, &table_names, "drop").await
}
_ => Err(PermissionError::UnsupportedStatement),
}
}
// Berechtigungen aus der Datenbank abrufen
let db_state = app.state::<DbConnection>();
let permissions =
get_extension_permissions(db_state, extension_id, "database", "create").await?;
/// Extrahiert Tabellenname aus INSERT-Statement
fn extract_table_name_from_insert(
insert: &sqlparser::ast::Insert,
) -> Result<String, PermissionError> {
match &insert.table {
TableObject::TableName(name) => Ok(name.to_string()),
_ => Err(PermissionError::NoTableSpecified {
statement_type: "INSERT".to_string(),
}),
}
}
// Prüfen, ob die Tabelle in den Berechtigungen enthalten ist
let has_permission = permissions
.iter()
.any(|perm| perm.path.contains(&table_name));
/// Extrahiert Tabellenname aus TableFactor
fn extract_table_name_from_table_factor(
table_factor: &TableFactor,
) -> Result<String, PermissionError> {
match table_factor {
TableFactor::Table { name, .. } => Ok(name.to_string()),
_ => Err(PermissionError::InvalidStatement {
reason: "Complex table references not supported".to_string(),
}),
}
}
if !has_permission {
return Err(format!(
"Keine Erstellungsberechtigung für Tabelle {}",
table_name
));
/// Extrahiert Tabellenname aus DELETE-Statement
fn extract_table_name_from_delete(
delete: &sqlparser::ast::Delete,
) -> Result<String, PermissionError> {
use sqlparser::ast::FromTable;
let table_name = match &delete.from {
FromTable::WithFromKeyword(tables) | FromTable::WithoutKeyword(tables) => {
if !tables.is_empty() {
extract_table_name_from_table_factor(&tables[0].relation)?
} else if !delete.tables.is_empty() {
delete.tables[0].to_string()
} else {
return Err(PermissionError::NoTableSpecified {
statement_type: "DELETE".to_string(),
});
}
}
_ => return Err("Nur Schreiboperationen erlaubt (nutze 'select' für Abfragen)".into()),
};
Ok(table_name)
}
/// Prüft Berechtigung für eine einzelne Tabelle
async fn check_single_table_permission(
connection: &DbConnection,
extension_id: &str,
table_name: &str,
operation: &str,
) -> Result<(), PermissionError> {
check_table_permissions(
connection,
extension_id,
&[table_name.to_string()],
operation,
)
.await
}
/// Prüft Berechtigungen für mehrere Tabellen
async fn check_table_permissions(
connection: &DbConnection,
extension_id: &str,
table_names: &[String],
operation: &str,
) -> Result<(), PermissionError> {
let permissions =
get_extension_permissions(connection, extension_id, "database", operation).await?;
for table_name in table_names {
let has_permission = permissions
.iter()
.any(|perm| perm.path.contains(table_name));
if !has_permission {
return Err(PermissionError::access_denied(
extension_id,
operation,
&format!("table '{}'", table_name),
"Table not in permitted resources",
));
}
}
Ok(())
@ -162,42 +208,98 @@ pub async fn check_write_permission(
/// Ruft die Berechtigungen einer Extension aus der Datenbank ab
async fn get_extension_permissions(
db_state: tauri::State<'_, DbConnection>,
connection: &DbConnection,
extension_id: &str,
resource: &str,
operation: &str,
) -> Result<Vec<DbExtensionPermission>, String> {
let db = db_state
.0
.lock()
.map_err(|e| format!("Mutex-Fehler: {}", e))?;
) -> Result<Vec<DbExtensionPermission>, DatabaseError> {
with_connection(connection, |conn| {
let mut stmt = conn
.prepare(
"SELECT id, extension_id, resource, operation, path
FROM haex_vault_extension_permissions
WHERE extension_id = ?1 AND resource = ?2 AND operation = ?3",
)
.map_err(|e| DatabaseError::PrepareError {
reason: e.to_string(),
})?;
let conn = db.as_ref().ok_or("Keine Datenbankverbindung vorhanden")?;
let mut stmt = conn
.prepare(
"SELECT id, extension_id, resource, operation, path
FROM haex_vault_extension_permissions
WHERE extension_id = ? AND resource = ? AND operation = ?",
)
.map_err(|e| format!("SQL-Vorbereitungsfehler: {}", e))?;
let rows = stmt
.query_map(&[extension_id, resource, operation], |row| {
Ok(DbExtensionPermission {
id: row.get(0)?,
extension_id: row.get(1)?,
resource: row.get(2)?,
operation: row.get(3)?,
path: row.get(4)?,
let rows = stmt
.query_map([extension_id, resource, operation], |row| {
Ok(DbExtensionPermission {
id: row.get(0)?,
extension_id: row.get(1)?,
resource: row.get(2)?,
operation: row.get(3)?,
path: row.get(4)?,
})
})
})
.map_err(|e| format!("SQL-Abfragefehler: {}", e))?;
.map_err(|e| DatabaseError::QueryError {
reason: e.to_string(),
})?;
let mut permissions = Vec::new();
for row in rows {
permissions.push(row.map_err(|e| format!("Fehler beim Lesen der Berechtigungen: {}", e))?);
let mut permissions = Vec::new();
for row_result in rows {
let permission = row_result.map_err(|e| DatabaseError::PermissionError {
extension_id: extension_id.to_string(),
operation: Some(operation.to_string()),
resource: Some(resource.to_string()),
reason: e.to_string(),
})?;
permissions.push(permission);
}
Ok(permissions)
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_single_statement() {
let sql = "SELECT * FROM users";
let result = parse_single_statement(sql);
assert!(result.is_ok());
assert!(matches!(result.unwrap(), Statement::Query(_)));
}
Ok(permissions)
#[test]
fn test_parse_invalid_sql() {
let sql = "INVALID SQL";
let result = parse_single_statement(sql);
// parse_single_statement gibt DatabaseError zurück, nicht PermissionError
assert!(result.is_err());
// Wenn du spezifischer sein möchtest, kannst du den DatabaseError-Typ prüfen:
match result {
Err(DatabaseError::ParseError { .. }) => {
// Test erfolgreich - wir haben einen ParseError erhalten
}
Err(other) => {
// Andere DatabaseError-Varianten sind auch akzeptabel für ungültiges SQL
println!("Received other DatabaseError: {:?}", other);
}
Ok(_) => panic!("Expected error for invalid SQL"),
}
}
#[test]
fn test_permission_error_access_denied() {
let error = PermissionError::access_denied("ext1", "read", "table1", "not allowed");
match error {
PermissionError::AccessDenied {
extension_id,
operation,
resource,
reason,
} => {
assert_eq!(extension_id, "ext1");
assert_eq!(operation, "read");
assert_eq!(resource, "table1");
assert_eq!(reason, "not allowed");
}
_ => panic!("Expected AccessDenied error"),
}
}
}

View File

@ -1,8 +1,2 @@
pub mod core;
pub mod database;
use tauri;
#[tauri::command]
pub async fn copy_directory(source: String, destination: String) -> Result<(), String> {
core::copy_directory(source, destination)
}

View File

@ -1,25 +1,29 @@
//mod browser;
//mod android_storage;
pub mod crdt;
mod crdt;
mod database;
mod extension;
mod models;
pub mod table_names {
include!(concat!(env!("OUT_DIR"), "/tableNames.rs"));
}
use models::ExtensionState;
use rusqlite::{Connection, OpenFlags};
use std::sync::{Arc, Mutex};
use crate::database::DbConnection;
use crate::{
crdt::hlc::HlcService,
database::{AppState, DbConnection},
};
#[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() {
let protocol_name = "haex-extension";
tauri::Builder::default()
.register_uri_scheme_protocol(protocol_name, move |context, request| {
/* .register_uri_scheme_protocol(protocol_name, move |context, request| {
match extension::core::extension_protocol_handler(&context, &request) {
Ok(response) => response, // Wenn der Handler Ok ist, gib die Response direkt zurück
Err(e) => {
@ -48,9 +52,13 @@ pub fn run() {
})
}
}
}) */
/* .manage(database::DbConnection(Arc::new(Mutex::new(None))))
.manage(crdt::hlc::HlcService::new()) */
.manage(AppState {
db: DbConnection(Arc::new(Mutex::new(None))),
hlc: Mutex::new(HlcService::new()), // Starte mit einem uninitialisierten HLC
})
.manage(DbConnection(Arc::new(Mutex::new(None))))
.manage(database::HlcService(Mutex::new(uhlc::HLC::default())))
.manage(ExtensionState::default())
.plugin(tauri_plugin_dialog::init())
.plugin(tauri_plugin_fs::init())
@ -60,20 +68,17 @@ pub fn run() {
.plugin(tauri_plugin_os::init())
.plugin(tauri_plugin_persisted_scope::init())
.plugin(tauri_plugin_store::Builder::new().build())
.plugin(tauri_plugin_android_fs::init())
//.plugin(tauri_plugin_sql::Builder::new().build())
//.plugin(tauri_plugin_android_fs::init())
.invoke_handler(tauri::generate_handler![
database::create_encrypted_database,
database::open_encrypted_database,
database::sql_execute,
database::sql_select,
database::test,
database::update_hlc_from_remote,
extension::copy_directory,
extension::database::extension_sql_select,
/* android_storage::request_storage_permission,
android_storage::has_storage_permission,
android_storage::get_external_storage_paths, */
extension::database::extension_sql_execute,
//database::update_hlc_from_remote,
/* extension::copy_directory,
extension::database::extension_sql_select, */
])
.run(tauri::generate_context!())
.expect("error while running tauri application");