mirror of
https://github.com/haexhub/haex-hub.git
synced 2025-12-16 22:20:51 +01:00
zwischenstand
This commit is contained in:
94
src-tauri/src/android_storage/mod.rs
Normal file
94
src-tauri/src/android_storage/mod.rs
Normal file
@ -0,0 +1,94 @@
|
||||
#[cfg(target_os = "android")]
|
||||
#[tauri::command]
|
||||
pub async fn request_storage_permission(app_handle: tauri::AppHandle) -> Result<String, String> {
|
||||
Ok("Settings opened - Enable 'Allow management of all files'".to_string())
|
||||
/* use tauri_plugin_opener::OpenerExt;
|
||||
|
||||
// Korrekte Android Settings Intent
|
||||
let intent_uri = "android.settings.MANAGE_ALL_FILES_ACCESS_PERMISSION";
|
||||
|
||||
match app.opener().open_url(intent_uri, None::<&str>) {
|
||||
Ok(_) => Ok("Settings opened - Enable 'Allow management of all files'".to_string()),
|
||||
Err(_) => {
|
||||
// Fallback: App-spezifische Settings
|
||||
let app_settings = format!(
|
||||
"android.settings.APPLICATION_DETAILS_SETTINGS?package={}",
|
||||
app.config().identifier
|
||||
);
|
||||
match app.opener().open_url(&app_settings, None::<&str>) {
|
||||
Ok(_) => Ok("App settings opened - Go to Permissions > Files and media".to_string()),
|
||||
Err(_) => Ok("Manually go to: Settings > Apps > Special app access > All files access > HaexHub > Allow".to_string())
|
||||
}
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
#[cfg(target_os = "android")]
|
||||
#[tauri::command]
|
||||
pub async fn has_storage_permission() -> Result<bool, String> {
|
||||
use std::path::Path;
|
||||
|
||||
// Teste Schreibzugriff auf externen Speicher
|
||||
let test_paths = [
|
||||
"/storage/emulated/0/Android",
|
||||
"/sdcard/Android",
|
||||
"/storage/emulated/0",
|
||||
];
|
||||
|
||||
for path in &test_paths {
|
||||
if Path::new(path).exists() {
|
||||
// Versuche Testdatei zu erstellen
|
||||
let test_file = format!("{}/haex_test.tmp", path);
|
||||
match std::fs::write(&test_file, "test") {
|
||||
Ok(_) => {
|
||||
let _ = std::fs::remove_file(&test_file);
|
||||
return Ok(true);
|
||||
}
|
||||
Err(_) => continue,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "android")]
|
||||
#[tauri::command]
|
||||
pub async fn get_external_storage_paths() -> Result<Vec<String>, String> {
|
||||
let mut paths = Vec::new();
|
||||
|
||||
let common_paths = [
|
||||
"/storage/emulated/0",
|
||||
"/sdcard",
|
||||
"/storage/emulated/0/Download",
|
||||
"/storage/emulated/0/Documents",
|
||||
"/storage/emulated/0/Pictures",
|
||||
"/storage/emulated/0/DCIM",
|
||||
];
|
||||
|
||||
for path in &common_paths {
|
||||
if std::path::Path::new(path).exists() {
|
||||
paths.push(path.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(paths)
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "android"))]
|
||||
#[tauri::command]
|
||||
pub async fn request_storage_permission(_app: tauri::AppHandle) -> Result<String, String> {
|
||||
Ok("aaaaaaaa".to_string())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "android"))]
|
||||
#[tauri::command]
|
||||
pub async fn has_storage_permission() -> Result<bool, String> {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "android"))]
|
||||
#[tauri::command]
|
||||
pub async fn get_external_storage_paths() -> Result<Vec<String>, String> {
|
||||
Ok(vec![])
|
||||
}
|
||||
22
src-tauri/src/crdt/log.rs
Normal file
22
src-tauri/src/crdt/log.rs
Normal file
@ -0,0 +1,22 @@
|
||||
// src/entities/crdt_log.rs
|
||||
use sea_orm::entity::prelude::*;
|
||||
|
||||
#[sea_orm(table_name = "crdt_log")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key, auto_increment = true)]
|
||||
pub id: i64,
|
||||
pub hlc_timestamp: String,
|
||||
pub op_type: String,
|
||||
pub table_name: String,
|
||||
pub row_pk: String, // Wird als JSON-String gespeichert
|
||||
#[sea_orm(nullable)]
|
||||
pub column_name: Option<String>,
|
||||
#[sea_orm(nullable)]
|
||||
pub value: Option<String>,
|
||||
#[sea_orm(nullable)]
|
||||
pub old_value: Option<String>,
|
||||
}
|
||||
|
||||
pub enum Relation {}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
165
src-tauri/src/crdt/proxy.rs
Normal file
165
src-tauri/src/crdt/proxy.rs
Normal file
@ -0,0 +1,165 @@
|
||||
// In src-tauri/src/sql_proxy.rs
|
||||
|
||||
use rusqlite::Connection;
|
||||
use sqlparser::ast::Statement;
|
||||
use sqlparser::ast::{ColumnDef, DataType, Expr, Ident, Query, Statement, TableWithJoins, Value};
|
||||
use sqlparser::dialect::SQLiteDialect;
|
||||
use sqlparser::parser::Parser;
|
||||
use sqlparser::visit_mut::{self, VisitorMut};
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
// Der Name der Tombstone-Spalte als Konstante, um "Magic Strings" zu vermeiden.
|
||||
pub const TOMBSTONE_COLUMN_NAME: &str = "tombstone";
|
||||
const EXCLUDED_TABLES: &[&str] = &["crdt_log"];
|
||||
|
||||
// Die Hauptstruktur unseres Proxys.
|
||||
// Sie ist zustandslos, da wir uns gegen einen Schema-Cache entschieden haben.
|
||||
pub struct SqlProxy;
|
||||
|
||||
impl SqlProxy {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
// Die zentrale Ausführungsfunktion
|
||||
pub fn execute(&self, sql: &str, conn: &Connection) -> Result<(), String> {
|
||||
// 1. Parsen des SQL-Strings in einen oder mehrere ASTs.
|
||||
// Ein String kann mehrere, durch Semikolon getrennte Anweisungen enthalten.
|
||||
let dialect = SQLiteDialect {};
|
||||
let mut ast_vec =
|
||||
Parser::parse_sql(&dialect, sql).map_err(|e| format!("SQL-Parse-Fehler: {}", e))?;
|
||||
|
||||
// 2. Wir durchlaufen und transformieren jedes einzelne Statement im AST-Vektor.
|
||||
for statement in &mut ast_vec {
|
||||
self.transform_statement(statement)?;
|
||||
}
|
||||
|
||||
// 3. Ausführen der (möglicherweise modifizierten) Anweisungen in einer einzigen Transaktion.
|
||||
// Dies stellt sicher, dass alle Operationen atomar sind.
|
||||
let tx = conn.transaction().map_err(|e| e.to_string())?;
|
||||
for statement in ast_vec {
|
||||
let final_sql = statement.to_string();
|
||||
tx.execute(&final_sql)
|
||||
.map_err(|e| format!("DB-Ausführungsfehler bei '{}': {}", final_sql, e))?;
|
||||
|
||||
// Wenn es ein CREATE/ALTER TABLE war, müssen die Trigger neu erstellt werden.
|
||||
// Dies geschieht innerhalb derselben Transaktion.
|
||||
if let Statement::CreateTable { name, .. } | Statement::AlterTable { name, .. } =
|
||||
statement
|
||||
{
|
||||
let table_name = name.0.last().unwrap().value.clone();
|
||||
let trigger_manager = crate::trigger_manager::TriggerManager::new(&tx);
|
||||
trigger_manager
|
||||
.setup_triggers_for_table(&table_name)
|
||||
.map_err(|e| {
|
||||
format!("Trigger-Setup-Fehler für Tabelle '{}': {}", table_name, e)
|
||||
})?;
|
||||
}
|
||||
}
|
||||
tx.commit().map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Diese Methode wendet die Transformation auf ein einzelnes Statement an.
|
||||
fn transform_statement(&self, statement: &mut Statement) -> Result<(), String> {
|
||||
let mut visitor = TombstoneVisitor;
|
||||
// `visit` durchläuft den AST und ruft die entsprechenden `visit_*_mut` Methoden auf.
|
||||
statement.visit(&mut visitor);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct TombstoneVisitor;
|
||||
|
||||
impl TombstoneVisitor {
|
||||
fn is_audited_table(&self, table_name: &str) -> bool {
|
||||
!EXCLUDED_TABLES.contains(&table_name.to_lowercase().as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl VisitorMut for TombstoneVisitor {
|
||||
type Break = ();
|
||||
|
||||
// Diese Methode wird für jedes Statement im AST aufgerufen
|
||||
fn visit_statement_mut(&mut self, stmt: &mut Statement) -> ControlFlow<Self::Break> {
|
||||
match stmt {
|
||||
// Fall 1: CREATE TABLE
|
||||
Statement::CreateTable { name, columns, .. } => {
|
||||
let table_name = name.0.last().unwrap().value.as_str();
|
||||
if self.is_audited_table(table_name) {
|
||||
// Füge die 'tombstone'-Spalte hinzu, wenn sie nicht existiert
|
||||
if !columns
|
||||
.iter()
|
||||
.any(|c| c.name.value.to_lowercase() == TOMBSTONE_COLUMN_NAME)
|
||||
{
|
||||
columns.push(ColumnDef {
|
||||
name: Ident::new(TOMBSTONE_COLUMN_NAME),
|
||||
data_type: DataType::Integer,
|
||||
collation: None,
|
||||
options: vec![], // Default ist 0
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fall 2: DELETE
|
||||
Statement::Delete(del_stmt) => {
|
||||
// Wandle das DELETE-Statement in ein UPDATE-Statement um
|
||||
let new_update = Statement::Update {
|
||||
table: del_stmt.from.clone(),
|
||||
assignments: vec![],
|
||||
value: Box::new(Expr::Value(Value::Number("1".to_string(), false))),
|
||||
from: None,
|
||||
selection: del_stmt.selection.clone(),
|
||||
returning: None,
|
||||
};
|
||||
// Ersetze das aktuelle Statement im AST
|
||||
*stmt = new_update;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Setze die Traversierung für untergeordnete Knoten fort (z.B. SELECTs)
|
||||
visit_mut::walk_statement_mut(self, stmt)
|
||||
}
|
||||
|
||||
// Diese Methode wird für jede Query (auch Subqueries) aufgerufen
|
||||
fn visit_query_mut(&mut self, query: &mut Query) -> ControlFlow<Self::Break> {
|
||||
// Zuerst rekursiv in die Tiefe gehen, um innere Queries zuerst zu bearbeiten
|
||||
visit_mut::walk_query_mut(self, query);
|
||||
|
||||
// Dann die WHERE-Klausel der aktuellen Query anpassen
|
||||
if let Some(from_clause) = query.body.as_select_mut().map(|s| &mut s.from) {
|
||||
// (Hier würde eine komplexere Logik zur Analyse der Joins und Tabellen stehen)
|
||||
// Vereinfacht nehmen wir an, wir fügen es für die erste Tabelle hinzu.
|
||||
let table_name = if let Some(relation) = from_clause.get_mut(0) {
|
||||
// Diese Logik muss verfeinert werden, um Aliase etc. zu behandeln
|
||||
relation.relation.to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
};
|
||||
|
||||
if self.is_audited_table(&table_name) {
|
||||
let tombstone_check = Expr::BinaryOp {
|
||||
left: Box::new(Expr::Identifier(Ident::new(TOMBSTONE_COLUMN_NAME))),
|
||||
op: sqlparser::ast::BinaryOperator::Eq,
|
||||
right: Box::new(Expr::Value(Value::Number("0".to_string(), false))),
|
||||
};
|
||||
|
||||
let existing_selection = query.selection.take();
|
||||
let new_selection = match existing_selection {
|
||||
Some(expr) => Expr::BinaryOp {
|
||||
left: Box::new(expr),
|
||||
op: sqlparser::ast::BinaryOperator::And,
|
||||
right: Box::new(tombstone_check),
|
||||
},
|
||||
None => tombstone_check,
|
||||
};
|
||||
query.selection = Some(Box::new(new_selection));
|
||||
}
|
||||
}
|
||||
|
||||
ControlFlow::Continue(())
|
||||
}
|
||||
}
|
||||
124
src-tauri/src/crdt/trigger.rs
Normal file
124
src-tauri/src/crdt/trigger.rs
Normal file
@ -0,0 +1,124 @@
|
||||
// In src-tauri/src/trigger_manager.rs -> impl<'a> TriggerManager<'a>
|
||||
|
||||
// In einem neuen Modul, z.B. src-tauri/src/trigger_manager.rs
|
||||
use crate::sql_proxy::ColumnInfo;
|
||||
use rusqlite::{Result, Transaction};
|
||||
|
||||
pub struct TriggerManager<'a> {
|
||||
tx: &'a Transaction<'a>,
|
||||
}
|
||||
|
||||
impl<'a> TriggerManager<'a> {
|
||||
pub fn new(tx: &'a Transaction<'a>) -> Self {
|
||||
Self { tx }
|
||||
}
|
||||
|
||||
// Die Hauptfunktion, die alles einrichtet
|
||||
pub fn setup_triggers_for_table(&self, table_name: &str) -> Result<()> {
|
||||
let columns = self.get_table_schema(table_name)?;
|
||||
let pk_cols: Vec<_> = columns
|
||||
.iter()
|
||||
.filter(|c| c.is_pk)
|
||||
.map(|c| c.name.as_str())
|
||||
.collect();
|
||||
let other_cols: Vec<_> = columns
|
||||
.iter()
|
||||
.filter(|c| !c.is_pk && c.name != "tombstone")
|
||||
.map(|c| c.name.as_str())
|
||||
.collect();
|
||||
|
||||
let drop_sql = self.generate_drop_triggers_sql(table_name);
|
||||
let insert_sql = self.generate_insert_trigger_sql(table_name, &pk_cols, &other_cols);
|
||||
let update_sql = self.generate_update_trigger_sql(table_name, &pk_cols, &other_cols);
|
||||
|
||||
self.tx.execute_batch(&drop_sql)?;
|
||||
self.tx.execute_batch(&insert_sql)?;
|
||||
self.tx.execute_batch(&update_sql)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_table_schema(&self, table_name: &str) -> Result<Vec<ColumnInfo>> {
|
||||
let sql = format!("PRAGMA table_info('{}')", table_name);
|
||||
let mut stmt = self.tx.prepare(&sql)?;
|
||||
let rows = stmt.query_map(|row| {
|
||||
let pk_val: i64 = row.get(5)?;
|
||||
Ok(ColumnInfo {
|
||||
name: row.get(1)?,
|
||||
is_pk: pk_val > 0,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut columns = Vec::new();
|
||||
for row_result in rows {
|
||||
columns.push(row_result?);
|
||||
}
|
||||
Ok(columns)
|
||||
}
|
||||
|
||||
//... Implementierung der SQL-Generierungsfunktionen...
|
||||
|
||||
fn generate_update_trigger_sql(&self, table_name: &str, pks: &[&str], cols: &[&str]) -> String {
|
||||
// Erstellt dynamisch die Key-Value-Paare für das JSON-Objekt des Primärschlüssels.
|
||||
let pk_json_payload_new = pks
|
||||
.iter()
|
||||
.map(|pk| format!("'{}', NEW.\"{}\"", pk, pk))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
let pk_json_payload_old = pks
|
||||
.iter()
|
||||
.map(|pk| format!("'{}', OLD.\"{}\"", pk, pk))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
// Erstellt die einzelnen INSERT-Anweisungen für jede Spalte
|
||||
let column_updates = cols.iter().map(|col| format!(
|
||||
r#"
|
||||
-- Protokolliere die Spaltenänderung, wenn sie stattgefunden hat und es kein Soft-Delete ist
|
||||
INSERT INTO crdt_log (hlc_timestamp, op_type, table_name, row_pk, column_name, value, old_value)
|
||||
SELECT
|
||||
'placeholder_hlc', -- TODO: HLC-Funktion hier aufrufen
|
||||
'UPDATE',
|
||||
'{table}',
|
||||
json_object({pk_payload_new}),
|
||||
'{column}',
|
||||
json_object('value', NEW."{column}"),
|
||||
json_object('value', OLD."{column}")
|
||||
WHERE
|
||||
NEW."{column}" IS NOT OLD."{column}"
|
||||
"#,
|
||||
table = table_name,
|
||||
pk_payload_new = pk_json_payload_new,
|
||||
column = col
|
||||
)).collect::<Vec<_>>().join("\n");
|
||||
|
||||
// Erstellt die Logik für den Soft-Delete
|
||||
let delete_logic = format!(
|
||||
r#"
|
||||
-- Protokolliere den Soft-Delete
|
||||
INSERT INTO crdt_log (hlc_timestamp, op_type, table_name, row_pk)
|
||||
SELECT
|
||||
'placeholder_hlc', -- TODO: HLC-Funktion hier aufrufen
|
||||
'DELETE',
|
||||
'{table}',
|
||||
json_object({pk_payload_old})
|
||||
WHERE
|
||||
OLD.{tombstone_col} = 0
|
||||
"#,
|
||||
table = table_name,
|
||||
pk_payload_old = pk_json_payload_old
|
||||
);
|
||||
|
||||
// Kombiniert alles zu einem einzigen Trigger
|
||||
format!(
|
||||
"CREATE TRIGGER IF NOT EXISTS {table_name}_crdt_update
|
||||
AFTER UPDATE ON {table_name}
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
{column_updates}
|
||||
{delete_logic}
|
||||
END;"
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -6,8 +6,8 @@ use rusqlite::{
|
||||
Connection, OpenFlags, ToSql,
|
||||
};
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::{fs, path::PathBuf};
|
||||
use tauri::State;
|
||||
// --- Hilfsfunktion: Konvertiert JSON Value zu etwas, das rusqlite versteht ---
|
||||
// Diese Funktion ist etwas knifflig wegen Ownership und Lifetimes.
|
||||
@ -168,7 +168,13 @@ pub fn open_and_init_db(path: &str, key: &str, create: bool) -> Result<Connectio
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE
|
||||
};
|
||||
|
||||
let conn = Connection::open_with_flags(path, flags).map_err(|e| e.to_string())?;
|
||||
let conn = Connection::open_with_flags(path, flags).map_err(|e| {
|
||||
format!(
|
||||
"Dateiii gibt es nicht: {}. Habe nach {} gesucht",
|
||||
e.to_string(),
|
||||
path
|
||||
)
|
||||
})?;
|
||||
conn.pragma_update(None, "key", key)
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ pub mod core;
|
||||
|
||||
use rusqlite::Connection;
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
@ -58,9 +59,14 @@ pub fn create_encrypted_database(
|
||||
app_handle.path().resource_dir()
|
||||
);
|
||||
|
||||
/* let resource_path = app_handle
|
||||
.path()
|
||||
.resolve("database/vault.db", BaseDirectory::Resource)
|
||||
.map_err(|e| format!("Fehler beim Auflösen des Ressourcenpfads: {}", e))?; */
|
||||
|
||||
let resource_path = app_handle
|
||||
.path()
|
||||
.resolve("database/vault.db", BaseDirectory::Resource)
|
||||
.resolve("temp_vault.db", BaseDirectory::AppLocalData)
|
||||
.map_err(|e| format!("Fehler beim Auflösen des Ressourcenpfads: {}", e))?;
|
||||
|
||||
// Prüfen, ob die Ressourcendatei existiert
|
||||
@ -72,12 +78,16 @@ pub fn create_encrypted_database(
|
||||
}
|
||||
|
||||
// Sicherstellen, dass das Zielverzeichnis existiert
|
||||
if let Some(parent) = Path::new(&path).parent() {
|
||||
/* if let Some(parent) = Path::new(&path).parent() {
|
||||
if !parent.exists() {
|
||||
std::fs::create_dir_all(parent)
|
||||
.map_err(|e| format!("Fehler beim Erstellen des Zielverzeichnisses: {}", e))?;
|
||||
std::fs::create_dir_all(parent).map_err(|e| {
|
||||
format!(
|
||||
"Fehler beim Erstellen des Zielverzeichnisses: {}\n mit Fehler {}",
|
||||
path, e
|
||||
)
|
||||
})?;
|
||||
}
|
||||
}
|
||||
} */
|
||||
|
||||
let target = Path::new(&path);
|
||||
if target.exists() & target.is_file() {
|
||||
@ -167,14 +177,23 @@ pub fn create_encrypted_database(
|
||||
Ok(format!("Verschlüsselte CRDT-Datenbank erstellt",))
|
||||
}
|
||||
|
||||
use tauri_plugin_dialog::{Dialog, DialogExt, MessageDialogKind};
|
||||
#[tauri::command]
|
||||
pub fn open_encrypted_database(
|
||||
app_handle: AppHandle,
|
||||
path: String,
|
||||
key: String,
|
||||
state: State<'_, DbConnection>,
|
||||
) -> Result<String, String> {
|
||||
/* let vault_path = app_handle
|
||||
.path()
|
||||
.resolve(format!("vaults/{}", path), BaseDirectory::AppLocalData)
|
||||
.map_err(|e| format!("Fehler {}", e))?
|
||||
.into_os_string()
|
||||
.into_string()
|
||||
.unwrap(); */
|
||||
if !std::path::Path::new(&path).exists() {
|
||||
return Err("File not found ".into());
|
||||
return Err(format!("File not found {}", path).into());
|
||||
}
|
||||
|
||||
let conn =
|
||||
|
||||
@ -1,63 +0,0 @@
|
||||
// src-tauri/src/sql_proxy.rs
|
||||
|
||||
use rusqlite::Connection;
|
||||
use sqlparser::ast::Statement;
|
||||
use sqlparser::dialect::SQLiteDialect;
|
||||
use sqlparser::parser::Parser;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
// Der Schema-Cache wird später benötigt, um zu wissen, welche Tabellen eine 'tombstone'-Spalte haben.
|
||||
// Für den Anfang lassen wir ihn leer.
|
||||
pub struct SchemaCache {
|
||||
// TODO: z.B. HashMap<String, Vec<String>> für Tabellen und ihre Spalten
|
||||
}
|
||||
|
||||
impl SchemaCache {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
// TODO: Methoden zum Befüllen und Abfragen des Caches
|
||||
}
|
||||
|
||||
// Die Hauptstruktur unseres Proxys
|
||||
pub struct SqlProxy {
|
||||
// Wir benötigen eine threadsichere Referenz auf den Schema-Cache
|
||||
schema_cache: Arc<Mutex<SchemaCache>>,
|
||||
}
|
||||
|
||||
impl SqlProxy {
|
||||
pub fn new(schema_cache: Arc<Mutex<SchemaCache>>) -> Self {
|
||||
Self { schema_cache }
|
||||
}
|
||||
|
||||
// Die zentrale Ausführungsfunktion
|
||||
pub fn execute(&self, sql: &str, conn: &Connection) -> Result<(), String> {
|
||||
// 1. Parsen des SQL-Strings in einen AST
|
||||
let dialect = SQLiteDialect {};
|
||||
let mut ast =
|
||||
Parser::parse_sql(&dialect, sql).map_err(|e| format!("SQL-Parse-Fehler: {}", e))?;
|
||||
|
||||
// Sicherstellen, dass wir nur eine Anweisung haben
|
||||
if ast.len() != 1 {
|
||||
return Err("Nur einzelne SQL-Anweisungen werden unterstützt.".to_string());
|
||||
}
|
||||
let statement = &mut ast;
|
||||
|
||||
// 2. Umschreiben des AST (Logik folgt in Abschnitt 2)
|
||||
self.transform_statement(statement)?;
|
||||
|
||||
// 3. Ausführen der (möglicherweise modifizierten) Anweisung
|
||||
let final_sql = statement.to_string();
|
||||
conn.execute(&final_sql)
|
||||
.map_err(|e| format!("DB-Ausführungsfehler: {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Platzhalter für die Transformationslogik
|
||||
fn transform_statement(&self, statement: &mut Statement) -> Result<(), String> {
|
||||
// HIER KOMMT DIE MAGIE HIN
|
||||
// TODO: Implementierung der `CREATE TABLE`, `DELETE` und `SELECT` Transformationen
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -1,4 +1,5 @@
|
||||
//mod browser;
|
||||
mod android_storage;
|
||||
mod database;
|
||||
mod extension;
|
||||
mod models;
|
||||
@ -44,13 +45,15 @@ pub fn run() {
|
||||
})
|
||||
.manage(DbConnection(Arc::new(Mutex::new(None))))
|
||||
.manage(ExtensionState::default())
|
||||
.plugin(tauri_plugin_notification::init())
|
||||
.plugin(tauri_plugin_dialog::init())
|
||||
.plugin(tauri_plugin_fs::init())
|
||||
.plugin(tauri_plugin_http::init())
|
||||
.plugin(tauri_plugin_notification::init())
|
||||
.plugin(tauri_plugin_opener::init())
|
||||
.plugin(tauri_plugin_os::init())
|
||||
.plugin(tauri_plugin_persisted_scope::init())
|
||||
.plugin(tauri_plugin_store::Builder::new().build())
|
||||
.plugin(tauri_plugin_android_fs::init())
|
||||
//.plugin(tauri_plugin_sql::Builder::new().build())
|
||||
.invoke_handler(tauri::generate_handler![
|
||||
database::create_encrypted_database,
|
||||
@ -63,6 +66,9 @@ pub fn run() {
|
||||
extension::copy_directory,
|
||||
extension::database::extension_sql_execute,
|
||||
extension::database::extension_sql_select,
|
||||
android_storage::request_storage_permission,
|
||||
android_storage::has_storage_permission,
|
||||
android_storage::get_external_storage_paths,
|
||||
])
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
|
||||
Reference in New Issue
Block a user