no more soft delete => we do it hard now

This commit is contained in:
2025-10-23 09:26:36 +02:00
parent 3d020e7dcf
commit 922ae539ba
17 changed files with 2430 additions and 826 deletions

View File

@ -0,0 +1 @@
CREATE UNIQUE INDEX `haex_workspaces_position_unique` ON `haex_workspaces` (`position`);

View File

@ -0,0 +1,92 @@
PRAGMA foreign_keys=OFF;--> statement-breakpoint
CREATE TABLE `__new_haex_desktop_items` (
`id` text PRIMARY KEY NOT NULL,
`workspace_id` text NOT NULL,
`item_type` text NOT NULL,
`reference_id` text NOT NULL,
`position_x` integer DEFAULT 0 NOT NULL,
`position_y` integer DEFAULT 0 NOT NULL,
`haex_tombstone` integer,
`haex_timestamp` text,
FOREIGN KEY (`workspace_id`) REFERENCES `haex_workspaces`(`id`) ON UPDATE no action ON DELETE cascade
);
--> statement-breakpoint
INSERT INTO `__new_haex_desktop_items`("id", "workspace_id", "item_type", "reference_id", "position_x", "position_y", "haex_tombstone", "haex_timestamp") SELECT "id", "workspace_id", "item_type", "reference_id", "position_x", "position_y", "haex_tombstone", "haex_timestamp" FROM `haex_desktop_items`;--> statement-breakpoint
DROP TABLE `haex_desktop_items`;--> statement-breakpoint
ALTER TABLE `__new_haex_desktop_items` RENAME TO `haex_desktop_items`;--> statement-breakpoint
PRAGMA foreign_keys=ON;--> statement-breakpoint
CREATE TABLE `__new_haex_extension_permissions` (
`id` text PRIMARY KEY NOT NULL,
`extension_id` text NOT NULL,
`resource_type` text,
`action` text,
`target` text,
`constraints` text,
`status` text DEFAULT 'denied' NOT NULL,
`created_at` text DEFAULT (CURRENT_TIMESTAMP),
`updated_at` integer,
`haex_tombstone` integer,
`haex_timestamp` text,
FOREIGN KEY (`extension_id`) REFERENCES `haex_extensions`(`id`) ON UPDATE no action ON DELETE cascade
);
--> statement-breakpoint
INSERT INTO `__new_haex_extension_permissions`("id", "extension_id", "resource_type", "action", "target", "constraints", "status", "created_at", "updated_at", "haex_tombstone", "haex_timestamp") SELECT "id", "extension_id", "resource_type", "action", "target", "constraints", "status", "created_at", "updated_at", "haex_tombstone", "haex_timestamp" FROM `haex_extension_permissions`;--> statement-breakpoint
DROP TABLE `haex_extension_permissions`;--> statement-breakpoint
ALTER TABLE `__new_haex_extension_permissions` RENAME TO `haex_extension_permissions`;--> statement-breakpoint
CREATE UNIQUE INDEX `haex_extension_permissions_extension_id_resource_type_action_target_unique` ON `haex_extension_permissions` (`extension_id`,`resource_type`,`action`,`target`);--> statement-breakpoint
CREATE TABLE `__new_haex_passwords_group_items` (
`group_id` text NOT NULL,
`item_id` text NOT NULL,
`haex_tombstone` integer,
PRIMARY KEY(`item_id`, `group_id`),
FOREIGN KEY (`group_id`) REFERENCES `haex_passwords_groups`(`id`) ON UPDATE no action ON DELETE cascade,
FOREIGN KEY (`item_id`) REFERENCES `haex_passwords_item_details`(`id`) ON UPDATE no action ON DELETE cascade
);
--> statement-breakpoint
INSERT INTO `__new_haex_passwords_group_items`("group_id", "item_id", "haex_tombstone") SELECT "group_id", "item_id", "haex_tombstone" FROM `haex_passwords_group_items`;--> statement-breakpoint
DROP TABLE `haex_passwords_group_items`;--> statement-breakpoint
ALTER TABLE `__new_haex_passwords_group_items` RENAME TO `haex_passwords_group_items`;--> statement-breakpoint
CREATE TABLE `__new_haex_passwords_groups` (
`id` text PRIMARY KEY NOT NULL,
`name` text,
`description` text,
`icon` text,
`order` integer,
`color` text,
`parent_id` text,
`created_at` text DEFAULT (CURRENT_TIMESTAMP),
`updated_at` integer,
`haex_tombstone` integer,
FOREIGN KEY (`parent_id`) REFERENCES `haex_passwords_groups`(`id`) ON UPDATE no action ON DELETE cascade
);
--> statement-breakpoint
INSERT INTO `__new_haex_passwords_groups`("id", "name", "description", "icon", "order", "color", "parent_id", "created_at", "updated_at", "haex_tombstone") SELECT "id", "name", "description", "icon", "order", "color", "parent_id", "created_at", "updated_at", "haex_tombstone" FROM `haex_passwords_groups`;--> statement-breakpoint
DROP TABLE `haex_passwords_groups`;--> statement-breakpoint
ALTER TABLE `__new_haex_passwords_groups` RENAME TO `haex_passwords_groups`;--> statement-breakpoint
CREATE TABLE `__new_haex_passwords_item_history` (
`id` text PRIMARY KEY NOT NULL,
`item_id` text NOT NULL,
`changed_property` text,
`old_value` text,
`new_value` text,
`created_at` text DEFAULT (CURRENT_TIMESTAMP),
`haex_tombstone` integer,
FOREIGN KEY (`item_id`) REFERENCES `haex_passwords_item_details`(`id`) ON UPDATE no action ON DELETE cascade
);
--> statement-breakpoint
INSERT INTO `__new_haex_passwords_item_history`("id", "item_id", "changed_property", "old_value", "new_value", "created_at", "haex_tombstone") SELECT "id", "item_id", "changed_property", "old_value", "new_value", "created_at", "haex_tombstone" FROM `haex_passwords_item_history`;--> statement-breakpoint
DROP TABLE `haex_passwords_item_history`;--> statement-breakpoint
ALTER TABLE `__new_haex_passwords_item_history` RENAME TO `haex_passwords_item_history`;--> statement-breakpoint
CREATE TABLE `__new_haex_passwords_item_key_values` (
`id` text PRIMARY KEY NOT NULL,
`item_id` text NOT NULL,
`key` text,
`value` text,
`updated_at` integer,
`haex_tombstone` integer,
FOREIGN KEY (`item_id`) REFERENCES `haex_passwords_item_details`(`id`) ON UPDATE no action ON DELETE cascade
);
--> statement-breakpoint
INSERT INTO `__new_haex_passwords_item_key_values`("id", "item_id", "key", "value", "updated_at", "haex_tombstone") SELECT "id", "item_id", "key", "value", "updated_at", "haex_tombstone" FROM `haex_passwords_item_key_values`;--> statement-breakpoint
DROP TABLE `haex_passwords_item_key_values`;--> statement-breakpoint
ALTER TABLE `__new_haex_passwords_item_key_values` RENAME TO `haex_passwords_item_key_values`;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -57,6 +57,20 @@
"when": 1761141111765,
"tag": "0007_stale_longshot",
"breakpoints": true
},
{
"idx": 8,
"version": "6",
"when": 1761145177028,
"tag": "0008_dizzy_blue_shield",
"breakpoints": true
},
{
"idx": 9,
"version": "6",
"when": 1761203548348,
"tag": "0009_boring_arclight",
"breakpoints": true
}
]
}

View File

@ -77,7 +77,11 @@ export const haexExtensionPermissions = sqliteTable(
.$defaultFn(() => crypto.randomUUID()),
extensionId: text(
tableNames.haex.extension_permissions.columns.extensionId,
).references((): AnySQLiteColumn => haexExtensions.id),
)
.notNull()
.references((): AnySQLiteColumn => haexExtensions.id, {
onDelete: 'cascade',
}),
resourceType: text('resource_type', {
enum: ['fs', 'http', 'db', 'shell'],
}),
@ -152,6 +156,7 @@ export const haexWorkspaces = sqliteTable(
},
tableNames.haex.workspaces.columns,
),
(table) => [unique().on(table.position)],
)
export type InsertHaexWorkspaces = typeof haexWorkspaces.$inferInsert
export type SelectHaexWorkspaces = typeof haexWorkspaces.$inferSelect
@ -165,7 +170,7 @@ export const haexDesktopItems = sqliteTable(
.$defaultFn(() => crypto.randomUUID()),
workspaceId: text(tableNames.haex.desktop_items.columns.workspaceId)
.notNull()
.references(() => haexWorkspaces.id),
.references(() => haexWorkspaces.id, { onDelete: 'cascade' }),
itemType: text(tableNames.haex.desktop_items.columns.itemType, {
enum: ['extension', 'file', 'folder'],
}).notNull(),

View File

@ -35,9 +35,11 @@ export const haexPasswordsItemKeyValues = sqliteTable(
tableNames.haex.passwords.item_key_values,
{
id: text().primaryKey(),
itemId: text('item_id').references(
(): AnySQLiteColumn => haexPasswordsItemDetails.id,
),
itemId: text('item_id')
.notNull()
.references((): AnySQLiteColumn => haexPasswordsItemDetails.id, {
onDelete: 'cascade',
}),
key: text(),
value: text(),
updateAt: integer('updated_at', { mode: 'timestamp' }).$onUpdate(
@ -55,9 +57,11 @@ export const haexPasswordsItemHistory = sqliteTable(
tableNames.haex.passwords.item_histories,
{
id: text().primaryKey(),
itemId: text('item_id').references(
(): AnySQLiteColumn => haexPasswordsItemDetails.id,
),
itemId: text('item_id')
.notNull()
.references((): AnySQLiteColumn => haexPasswordsItemDetails.id, {
onDelete: 'cascade',
}),
changedProperty:
text('changed_property').$type<keyof typeof haexPasswordsItemDetails>(),
oldValue: text('old_value'),
@ -82,6 +86,7 @@ export const haexPasswordsGroups = sqliteTable(
color: text(),
parentId: text('parent_id').references(
(): AnySQLiteColumn => haexPasswordsGroups.id,
{ onDelete: 'cascade' },
),
createdAt: text('created_at').default(sql`(CURRENT_TIMESTAMP)`),
updateAt: integer('updated_at', { mode: 'timestamp' }).$onUpdate(
@ -96,12 +101,16 @@ export type SelectHaexPasswordsGroups = typeof haexPasswordsGroups.$inferSelect
export const haexPasswordsGroupItems = sqliteTable(
tableNames.haex.passwords.group_items,
{
groupId: text('group_id').references(
(): AnySQLiteColumn => haexPasswordsGroups.id,
),
itemId: text('item_id').references(
(): AnySQLiteColumn => haexPasswordsItemDetails.id,
),
groupId: text('group_id')
.notNull()
.references((): AnySQLiteColumn => haexPasswordsGroups.id, {
onDelete: 'cascade',
}),
itemId: text('item_id')
.notNull()
.references((): AnySQLiteColumn => haexPasswordsItemDetails.id, {
onDelete: 'cascade',
}),
haex_tombstone: integer({ mode: 'boolean' }),
},
(table) => [primaryKey({ columns: [table.itemId, table.groupId] })],

Binary file not shown.

View File

@ -3,10 +3,7 @@
use crate::crdt::trigger::{HLC_TIMESTAMP_COLUMN, TOMBSTONE_COLUMN};
use crate::database::error::DatabaseError;
use sqlparser::ast::{
Assignment, AssignmentTarget, BinaryOperator, Expr, Ident, Insert, ObjectNamePart, OnConflict,
OnConflictAction, OnInsert, SelectItem, SetExpr, Value,
};
use sqlparser::ast::{Expr, Ident, Insert, SelectItem, SetExpr, Value};
use uhlc::Timestamp;
/// Helper-Struct für INSERT-Transformationen
@ -54,14 +51,12 @@ impl InsertTransformer {
}
}
/// Transformiert INSERT-Statements (fügt HLC-Timestamp hinzu und behandelt Tombstone-Konflikte)
/// Fügt automatisch RETURNING für Primary Keys hinzu, damit der Executor die tatsächlichen PKs kennt
/// Transformiert INSERT-Statements (fügt HLC-Timestamp hinzu)
/// Hard Delete: Kein ON CONFLICT mehr nötig - gelöschte Einträge sind wirklich weg
pub fn transform_insert(
&self,
insert_stmt: &mut Insert,
timestamp: &Timestamp,
primary_keys: &[String],
foreign_keys: &[String],
) -> Result<(), DatabaseError> {
// Add both haex_timestamp and haex_tombstone columns if not exists
let hlc_col_index =
@ -69,85 +64,9 @@ impl InsertTransformer {
let tombstone_col_index =
Self::find_or_add_column(&mut insert_stmt.columns, self.tombstone_column);
// Füge RETURNING für alle Primary Keys hinzu (falls noch nicht vorhanden)
// Dies erlaubt uns, die tatsächlichen PK-Werte nach ON CONFLICT zu kennen
if insert_stmt.returning.is_none() && !primary_keys.is_empty() {
insert_stmt.returning = Some(
primary_keys
.iter()
.map(|pk| SelectItem::UnnamedExpr(Expr::Identifier(Ident::new(pk))))
.collect(),
);
}
// Setze ON CONFLICT für UPSERT-Verhalten bei Tombstone-Einträgen
// Dies ermöglicht das Wiederverwenden von gelöschten Einträgen
if insert_stmt.on.is_none() {
// ON CONFLICT DO UPDATE SET ...
// Aktualisiere alle Spalten außer CRDT-Spalten, wenn ein Konflikt auftritt
// Erstelle UPDATE-Assignments für alle Spalten außer CRDT-Spalten, Primary Keys und Foreign Keys
let mut assignments = Vec::new();
for column in insert_stmt.columns.clone().iter() {
let col_name = &column.value;
// Überspringe CRDT-Spalten
if col_name == self.hlc_timestamp_column || col_name == self.tombstone_column {
continue;
}
// Überspringe Primary Key Spalten um FOREIGN KEY Konflikte zu vermeiden
if primary_keys.contains(col_name) {
continue;
}
// Überspringe Foreign Key Spalten um FOREIGN KEY Konflikte zu vermeiden
// Wenn eine FK auf eine neue ID verweist, die noch nicht existiert, schlägt der Constraint fehl
if foreign_keys.contains(col_name) {
continue;
}
// excluded.column_name referenziert die neuen Werte aus dem INSERT
assignments.push(Assignment {
target: AssignmentTarget::ColumnName(sqlparser::ast::ObjectName(vec![
ObjectNamePart::Identifier(column.clone()),
])),
value: Expr::CompoundIdentifier(vec![Ident::new("excluded"), column.clone()]),
});
}
// Füge HLC-Timestamp Update hinzu (mit dem übergebenen timestamp)
assignments.push(Assignment {
target: AssignmentTarget::ColumnName(sqlparser::ast::ObjectName(vec![
ObjectNamePart::Identifier(Ident::new(self.hlc_timestamp_column)),
])),
value: Expr::Value(Value::SingleQuotedString(timestamp.to_string()).into()),
});
// Setze Tombstone auf 0 (reaktiviere den Eintrag)
assignments.push(Assignment {
target: AssignmentTarget::ColumnName(sqlparser::ast::ObjectName(vec![
ObjectNamePart::Identifier(Ident::new(self.tombstone_column)),
])),
value: Expr::Value(Value::Number("0".to_string(), false).into()),
});
// ON CONFLICT nur wenn Tombstone = 1 (Eintrag wurde gelöscht)
// Ansonsten soll der INSERT fehlschlagen (UNIQUE constraint error)
let tombstone_condition = Expr::BinaryOp {
left: Box::new(Expr::Identifier(Ident::new(self.tombstone_column))),
op: BinaryOperator::Eq,
right: Box::new(Expr::Value(Value::Number("1".to_string(), false).into())),
};
insert_stmt.on = Some(OnInsert::OnConflict(OnConflict {
conflict_target: None, // Wird auf alle UNIQUE Constraints angewendet
action: OnConflictAction::DoUpdate(sqlparser::ast::DoUpdate {
assignments,
selection: Some(tombstone_condition),
}),
}));
}
// ON CONFLICT Logik komplett entfernt!
// Bei Hard Deletes gibt es keine Tombstone-Einträge mehr zu reaktivieren
// UNIQUE Constraint Violations sind echte Fehler
match insert_stmt.source.as_mut() {
Some(query) => match &mut *query.body {

View File

@ -5,8 +5,8 @@ use crate::crdt::trigger::{HLC_TIMESTAMP_COLUMN, TOMBSTONE_COLUMN};
use crate::database::error::DatabaseError;
use crate::table_names::{TABLE_CRDT_CONFIGS, TABLE_CRDT_LOGS};
use sqlparser::ast::{
Assignment, AssignmentTarget, BinaryOperator, ColumnDef, DataType, Expr, Ident, ObjectName,
ObjectNamePart, SelectItem, SetExpr, Statement, TableFactor, TableObject, Value,
Assignment, AssignmentTarget, ColumnDef, DataType, Expr, Ident, ObjectName,
ObjectNamePart, Statement, TableFactor, TableObject, Value,
};
use std::borrow::Cow;
use std::collections::HashSet;
@ -25,32 +25,6 @@ impl CrdtColumns {
hlc_timestamp: HLC_TIMESTAMP_COLUMN,
};
/// Erstellt einen Tombstone-Filter für eine Tabelle
fn create_tombstone_filter(&self, table_alias: Option<&str>) -> Expr {
let column_expr = match table_alias {
Some(alias) => {
// Qualifizierte Referenz: alias.tombstone
Expr::CompoundIdentifier(vec![Ident::new(alias), Ident::new(self.tombstone)])
}
None => {
// Einfache Referenz: tombstone
Expr::Identifier(Ident::new(self.tombstone))
}
};
Expr::IsNotTrue(Box::new(column_expr))
}
/// Erstellt eine Tombstone-Zuweisung für UPDATE/DELETE
fn create_tombstone_assignment(&self) -> Assignment {
Assignment {
target: AssignmentTarget::ColumnName(ObjectName(vec![ObjectNamePart::Identifier(
Ident::new(self.tombstone),
)])),
value: Expr::Value(Value::Number("1".to_string(), false).into()),
}
}
/// Erstellt eine HLC-Zuweisung für UPDATE/DELETE
fn create_hlc_assignment(&self, timestamp: &Timestamp) -> Assignment {
Assignment {
@ -112,27 +86,11 @@ impl CrdtTransformer {
// =================================================================
// ÖFFENTLICHE API-METHODEN
// =================================================================
pub fn transform_select_statement(&self, stmt: &mut Statement) -> Result<(), DatabaseError> {
match stmt {
Statement::Query(query) => {
// Ruft jetzt die private Methode in diesem Struct auf
self.transform_select_query_recursive(&mut query.body, &self.excluded_tables)
}
// Fange alle anderen Fälle ab und gib einen Fehler zurück
_ => Err(DatabaseError::UnsupportedStatement {
sql: stmt.to_string(),
reason: "This operation only accepts SELECT statements.".to_string(),
}),
}
}
/// Transformiert Statements MIT Zugriff auf Tabelleninformationen (empfohlen)
pub fn transform_execute_statement_with_table_info(
&self,
stmt: &mut Statement,
hlc_timestamp: &Timestamp,
tx: &rusqlite::Transaction,
) -> Result<Option<String>, DatabaseError> {
match stmt {
Statement::CreateTable(create_table) => {
@ -149,37 +107,9 @@ impl CrdtTransformer {
Statement::Insert(insert_stmt) => {
if let TableObject::TableName(name) = &insert_stmt.table {
if self.is_crdt_sync_table(name) {
// Hole die Tabelleninformationen um PKs und FKs zu identifizieren
let table_name_str = self.normalize_table_name(name);
let columns = crate::crdt::trigger::get_table_schema(tx, &table_name_str)
.map_err(|e| DatabaseError::ExecutionError {
sql: format!("PRAGMA table_info('{}')", table_name_str),
reason: e.to_string(),
table: Some(table_name_str.to_string()),
})?;
let primary_keys: Vec<String> = columns
.iter()
.filter(|c| c.is_pk)
.map(|c| c.name.clone())
.collect();
let foreign_keys =
crate::crdt::trigger::get_foreign_key_columns(tx, &table_name_str)
.map_err(|e| DatabaseError::ExecutionError {
sql: format!("PRAGMA foreign_key_list('{}')", table_name_str),
reason: e.to_string(),
table: Some(table_name_str.to_string()),
})?;
// Hard Delete: Kein Schema-Lookup mehr nötig (kein ON CONFLICT)
let insert_transformer = InsertTransformer::new();
insert_transformer.transform_insert(
insert_stmt,
hlc_timestamp,
&primary_keys,
&foreign_keys,
)?;
insert_transformer.transform_insert(insert_stmt, hlc_timestamp)?;
}
}
Ok(None)
@ -194,26 +124,11 @@ impl CrdtTransformer {
}
Ok(None)
}
Statement::Delete(del_stmt) => {
if let Some(table_name) = self.extract_table_name_from_delete(del_stmt) {
let table_name_str = self.normalize_table_name(&table_name);
let is_crdt = self.is_crdt_sync_table(&table_name);
eprintln!("DEBUG DELETE (with_table_info): table='{}', is_crdt_sync={}, normalized='{}'",
table_name, is_crdt, table_name_str);
if is_crdt {
eprintln!(
"DEBUG: Transforming DELETE to UPDATE for table '{}'",
table_name_str
);
self.transform_delete_to_update(stmt, hlc_timestamp)?;
}
Ok(None)
} else {
Err(DatabaseError::UnsupportedStatement {
sql: del_stmt.to_string(),
reason: "DELETE from non-table source or multiple tables".to_string(),
})
}
Statement::Delete(_del_stmt) => {
// Hard Delete - keine Transformation!
// DELETE bleibt DELETE
// BEFORE DELETE Trigger schreiben die Logs
Ok(None)
}
Statement::AlterTable { name, .. } => {
if self.is_crdt_sync_table(name) {
@ -231,9 +146,6 @@ impl CrdtTransformer {
stmt: &mut Statement,
hlc_timestamp: &Timestamp,
) -> Result<Option<String>, DatabaseError> {
// Für INSERT-Statements ohne Connection nutzen wir eine leere PK-Liste
// Das bedeutet ALLE Spalten werden im ON CONFLICT UPDATE gesetzt
// Dies ist ein Fallback für den Fall, dass keine Connection verfügbar ist
match stmt {
Statement::CreateTable(create_table) => {
if self.is_crdt_sync_table(&create_table.name) {
@ -249,14 +161,9 @@ impl CrdtTransformer {
Statement::Insert(insert_stmt) => {
if let TableObject::TableName(name) = &insert_stmt.table {
if self.is_crdt_sync_table(name) {
// Ohne Connection: leere PK- und FK-Listen (alle Spalten werden upgedatet)
// Hard Delete: Keine ON CONFLICT Logik mehr nötig
let insert_transformer = InsertTransformer::new();
insert_transformer.transform_insert(
insert_stmt,
hlc_timestamp,
&[],
&[],
)?;
insert_transformer.transform_insert(insert_stmt, hlc_timestamp)?;
}
}
Ok(None)
@ -264,25 +171,17 @@ impl CrdtTransformer {
Statement::Update {
table, assignments, ..
} => {
if let TableFactor::Table { name, .. } = &table.relation {
if let TableFactor::Table { name, ..} = &table.relation {
if self.is_crdt_sync_table(name) {
assignments.push(self.columns.create_hlc_assignment(hlc_timestamp));
}
}
Ok(None)
}
Statement::Delete(del_stmt) => {
if let Some(table_name) = self.extract_table_name_from_delete(del_stmt) {
if self.is_crdt_sync_table(&table_name) {
self.transform_delete_to_update(stmt, hlc_timestamp)?;
}
Ok(None)
} else {
Err(DatabaseError::UnsupportedStatement {
sql: del_stmt.to_string(),
reason: "DELETE from non-table source or multiple tables".to_string(),
})
}
Statement::Delete(_del_stmt) => {
// Hard Delete - keine Transformation!
// DELETE bleibt DELETE
Ok(None)
}
Statement::AlterTable { name, .. } => {
if self.is_crdt_sync_table(name) {
@ -294,539 +193,4 @@ impl CrdtTransformer {
_ => Ok(None),
}
}
// =================================================================
// PRIVATE HELFER (DELETE/UPDATE)
// =================================================================
/// Transformiert DELETE zu UPDATE (soft delete)
fn transform_delete_to_update(
&self,
stmt: &mut Statement,
timestamp: &Timestamp,
) -> Result<(), DatabaseError> {
if let Statement::Delete(del_stmt) = stmt {
let table_to_update = match &del_stmt.from {
sqlparser::ast::FromTable::WithFromKeyword(from)
| sqlparser::ast::FromTable::WithoutKeyword(from) => {
if from.len() == 1 {
from[0].clone()
} else {
return Err(DatabaseError::UnsupportedStatement {
reason: "DELETE with multiple tables not supported".to_string(),
sql: stmt.to_string(),
});
}
}
};
let assignments = vec![
self.columns.create_tombstone_assignment(),
self.columns.create_hlc_assignment(timestamp),
];
*stmt = Statement::Update {
table: table_to_update,
assignments,
from: None,
selection: del_stmt.selection.clone(),
returning: None,
or: None,
limit: None,
};
}
Ok(())
}
/// Extrahiert Tabellennamen aus DELETE-Statement
fn extract_table_name_from_delete(
&self,
del_stmt: &sqlparser::ast::Delete,
) -> Option<ObjectName> {
let tables = match &del_stmt.from {
sqlparser::ast::FromTable::WithFromKeyword(from)
| sqlparser::ast::FromTable::WithoutKeyword(from) => from,
};
if tables.len() == 1 {
if let TableFactor::Table { name, .. } = &tables[0].relation {
Some(name.clone())
} else {
None
}
} else {
None
}
}
// =================================================================
// PRIVATE HELFER (SELECT-TRANSFORMATION)
// (Diese Methoden kommen aus dem alten `query_transformer.rs`)
// =================================================================
/// Rekursive Behandlung aller SetExpr-Typen mit vollständiger Subquery-Unterstützung
fn transform_select_query_recursive(
&self,
set_expr: &mut SetExpr,
excluded_tables: &std::collections::HashSet<&str>,
) -> Result<(), DatabaseError> {
match set_expr {
SetExpr::Select(select) => {
self.add_tombstone_filters_to_select(select, excluded_tables)?;
// Transformiere auch Subqueries in Projektionen
for projection in &mut select.projection {
match projection {
SelectItem::UnnamedExpr(expr) | SelectItem::ExprWithAlias { expr, .. } => {
self.transform_expression_subqueries(expr, excluded_tables)?;
}
_ => {} // Wildcard projections ignorieren
}
}
// Transformiere Subqueries in WHERE
if let Some(where_clause) = &mut select.selection {
self.transform_expression_subqueries(where_clause, excluded_tables)?;
}
// Transformiere Subqueries in GROUP BY
match &mut select.group_by {
sqlparser::ast::GroupByExpr::All(_) => {
// GROUP BY ALL - keine Expressions zu transformieren
}
sqlparser::ast::GroupByExpr::Expressions(exprs, _) => {
for group_expr in exprs {
self.transform_expression_subqueries(group_expr, excluded_tables)?;
}
}
}
// Transformiere Subqueries in HAVING
if let Some(having) = &mut select.having {
self.transform_expression_subqueries(having, excluded_tables)?;
}
}
SetExpr::SetOperation { left, right, .. } => {
self.transform_select_query_recursive(left, excluded_tables)?;
self.transform_select_query_recursive(right, excluded_tables)?;
}
SetExpr::Query(query) => {
self.transform_select_query_recursive(&mut query.body, excluded_tables)?;
}
SetExpr::Values(values) => {
// Transformiere auch Subqueries in Values-Listen
for row in &mut values.rows {
for expr in row {
self.transform_expression_subqueries(expr, excluded_tables)?;
}
}
}
_ => {} // Andere Fälle
}
Ok(())
}
/// Transformiert Subqueries innerhalb von Expressions
fn transform_expression_subqueries(
&self,
expr: &mut Expr,
excluded_tables: &std::collections::HashSet<&str>,
) -> Result<(), DatabaseError> {
match expr {
// Einfache Subqueries
Expr::Subquery(query) => {
self.transform_select_query_recursive(&mut query.body, excluded_tables)?;
}
// EXISTS Subqueries
Expr::Exists { subquery, .. } => {
self.transform_select_query_recursive(&mut subquery.body, excluded_tables)?;
}
// IN Subqueries
Expr::InSubquery {
expr: left_expr,
subquery,
..
} => {
self.transform_expression_subqueries(left_expr, excluded_tables)?;
self.transform_select_query_recursive(&mut subquery.body, excluded_tables)?;
}
// ANY/ALL Subqueries
Expr::AnyOp { left, right, .. } | Expr::AllOp { left, right, .. } => {
self.transform_expression_subqueries(left, excluded_tables)?;
self.transform_expression_subqueries(right, excluded_tables)?;
}
// Binäre Operationen
Expr::BinaryOp { left, right, .. } => {
self.transform_expression_subqueries(left, excluded_tables)?;
self.transform_expression_subqueries(right, excluded_tables)?;
}
// Unäre Operationen
Expr::UnaryOp {
expr: inner_expr, ..
} => {
self.transform_expression_subqueries(inner_expr, excluded_tables)?;
}
// Verschachtelte Ausdrücke
Expr::Nested(nested) => {
self.transform_expression_subqueries(nested, excluded_tables)?;
}
// CASE-Ausdrücke
Expr::Case {
operand,
conditions,
else_result,
..
} => {
if let Some(op) = operand {
self.transform_expression_subqueries(op, excluded_tables)?;
}
for case_when in conditions {
self.transform_expression_subqueries(
&mut case_when.condition,
excluded_tables,
)?;
self.transform_expression_subqueries(&mut case_when.result, excluded_tables)?;
}
if let Some(else_res) = else_result {
self.transform_expression_subqueries(else_res, excluded_tables)?;
}
}
// Funktionsaufrufe
Expr::Function(func) => match &mut func.args {
sqlparser::ast::FunctionArguments::List(sqlparser::ast::FunctionArgumentList {
args,
..
}) => {
for arg in args {
if let sqlparser::ast::FunctionArg::Unnamed(
sqlparser::ast::FunctionArgExpr::Expr(expr),
) = arg
{
self.transform_expression_subqueries(expr, excluded_tables)?;
}
}
}
_ => {}
},
// BETWEEN
Expr::Between {
expr: main_expr,
low,
high,
..
} => {
self.transform_expression_subqueries(main_expr, excluded_tables)?;
self.transform_expression_subqueries(low, excluded_tables)?;
self.transform_expression_subqueries(high, excluded_tables)?;
}
// IN Liste
Expr::InList {
expr: main_expr,
list,
..
} => {
self.transform_expression_subqueries(main_expr, excluded_tables)?;
for list_expr in list {
self.transform_expression_subqueries(list_expr, excluded_tables)?;
}
}
// IS NULL/IS NOT NULL
Expr::IsNull(inner) | Expr::IsNotNull(inner) => {
self.transform_expression_subqueries(inner, excluded_tables)?;
}
// Andere Expression-Typen benötigen keine Transformation
_ => {}
}
Ok(())
}
/// Fügt Tombstone-Filter zu SELECT-Statements hinzu
fn add_tombstone_filters_to_select(
&self,
select: &mut sqlparser::ast::Select,
excluded_tables: &HashSet<&str>,
) -> Result<(), DatabaseError> {
// Sammle alle CRDT-Tabellen mit ihren Aliasen
let mut crdt_tables = Vec::new();
for twj in &select.from {
if let TableFactor::Table { name, alias, .. } = &twj.relation {
// Nutzt die zentrale Logik von CrdtTransformer
if self.is_crdt_sync_table(name) {
let table_alias = alias.as_ref().map(|a| a.name.value.as_str());
crdt_tables.push((name.clone(), table_alias));
}
}
}
if crdt_tables.is_empty() {
return Ok(());
}
// Prüfe, welche Tombstone-Spalten bereits in der WHERE-Klausel referenziert werden
let explicitly_filtered_tables = if let Some(where_clause) = &select.selection {
self.find_explicitly_filtered_tombstone_tables(where_clause, &crdt_tables)
} else {
HashSet::new()
};
// Erstelle Filter nur für Tabellen, die noch nicht explizit gefiltert werden
let mut tombstone_filters = Vec::new();
for (table_name, table_alias) in crdt_tables {
let table_name_string = table_name.to_string();
let table_key = table_alias.unwrap_or(&table_name_string);
if !explicitly_filtered_tables.contains(table_key) {
// Nutzt die zentrale Logik von CrdtColumns
tombstone_filters.push(self.columns.create_tombstone_filter(table_alias));
}
}
// Füge die automatischen Filter hinzu
if !tombstone_filters.is_empty() {
let combined_filter = tombstone_filters
.into_iter()
.reduce(|acc, expr| Expr::BinaryOp {
left: Box::new(acc),
op: BinaryOperator::And,
right: Box::new(expr),
})
.unwrap();
match &mut select.selection {
Some(existing) => {
*existing = Expr::BinaryOp {
left: Box::new(existing.clone()),
op: BinaryOperator::And,
right: Box::new(combined_filter),
};
}
None => {
select.selection = Some(combined_filter);
}
}
}
Ok(())
}
/// Findet alle Tabellen, die bereits explizit Tombstone-Filter in der WHERE-Klausel haben
fn find_explicitly_filtered_tombstone_tables(
&self,
where_expr: &Expr,
crdt_tables: &[(ObjectName, Option<&str>)],
) -> HashSet<String> {
let mut filtered_tables = HashSet::new();
self.scan_expression_for_tombstone_references(
where_expr,
crdt_tables,
&mut filtered_tables,
);
filtered_tables
}
/// Rekursiv durchsucht einen Expression-Baum nach Tombstone-Spalten-Referenzen
fn scan_expression_for_tombstone_references(
&self,
expr: &Expr,
crdt_tables: &[(ObjectName, Option<&str>)],
filtered_tables: &mut HashSet<String>,
) {
match expr {
Expr::Identifier(ident) => {
// Nutzt die zentrale Konfiguration von CrdtColumns
if ident.value == self.columns.tombstone && crdt_tables.len() == 1 {
let table_name_str = crdt_tables[0].0.to_string();
let table_key = crdt_tables[0].1.unwrap_or(&table_name_str);
filtered_tables.insert(table_key.to_string());
}
}
Expr::CompoundIdentifier(idents) => {
// Nutzt die zentrale Konfiguration von CrdtColumns
if idents.len() == 2 && idents[1].value == self.columns.tombstone {
let table_ref = &idents[0].value;
for (table_name, alias) in crdt_tables {
let table_name_str = table_name.to_string();
if table_ref == &table_name_str || alias.map_or(false, |a| a == table_ref) {
filtered_tables.insert(table_ref.clone());
break;
}
}
}
}
Expr::BinaryOp { left, right, .. } => {
self.scan_expression_for_tombstone_references(left, crdt_tables, filtered_tables);
self.scan_expression_for_tombstone_references(right, crdt_tables, filtered_tables);
}
Expr::UnaryOp { expr, .. } => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
}
Expr::Nested(nested) => {
self.scan_expression_for_tombstone_references(nested, crdt_tables, filtered_tables);
}
Expr::InList { expr, .. } => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
}
Expr::Between { expr, .. } => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
}
Expr::IsNull(expr) | Expr::IsNotNull(expr) => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
}
Expr::Function(func) => {
if let sqlparser::ast::FunctionArguments::List(
sqlparser::ast::FunctionArgumentList { args, .. },
) = &func.args
{
for arg in args {
if let sqlparser::ast::FunctionArg::Unnamed(
sqlparser::ast::FunctionArgExpr::Expr(expr),
) = arg
{
self.scan_expression_for_tombstone_references(
expr,
crdt_tables,
filtered_tables,
);
}
}
}
}
Expr::Case {
operand,
conditions,
else_result,
..
} => {
if let Some(op) = operand {
self.scan_expression_for_tombstone_references(op, crdt_tables, filtered_tables);
}
for case_when in conditions {
self.scan_expression_for_tombstone_references(
&case_when.condition,
crdt_tables,
filtered_tables,
);
self.scan_expression_for_tombstone_references(
&case_when.result,
crdt_tables,
filtered_tables,
);
}
if let Some(else_res) = else_result {
self.scan_expression_for_tombstone_references(
else_res,
crdt_tables,
filtered_tables,
);
}
}
Expr::Subquery(query) => {
self.analyze_query_for_tombstone_references(query, crdt_tables, filtered_tables)
.ok();
}
Expr::Exists { subquery, .. } => {
self.analyze_query_for_tombstone_references(subquery, crdt_tables, filtered_tables)
.ok();
}
Expr::InSubquery { expr, subquery, .. } => {
self.scan_expression_for_tombstone_references(expr, crdt_tables, filtered_tables);
self.analyze_query_for_tombstone_references(subquery, crdt_tables, filtered_tables)
.ok();
}
Expr::AnyOp { left, right, .. } | Expr::AllOp { left, right, .. } => {
self.scan_expression_for_tombstone_references(left, crdt_tables, filtered_tables);
self.scan_expression_for_tombstone_references(right, crdt_tables, filtered_tables);
}
_ => {}
}
}
fn analyze_query_for_tombstone_references(
&self,
query: &sqlparser::ast::Query,
crdt_tables: &[(ObjectName, Option<&str>)],
filtered_tables: &mut HashSet<String>,
) -> Result<(), DatabaseError> {
self.analyze_set_expr_for_tombstone_references(&query.body, crdt_tables, filtered_tables)
}
fn analyze_set_expr_for_tombstone_references(
&self,
set_expr: &SetExpr,
crdt_tables: &[(ObjectName, Option<&str>)],
filtered_tables: &mut HashSet<String>,
) -> Result<(), DatabaseError> {
match set_expr {
SetExpr::Select(select) => {
if let Some(where_clause) = &select.selection {
self.scan_expression_for_tombstone_references(
where_clause,
crdt_tables,
filtered_tables,
);
}
for projection in &select.projection {
match projection {
SelectItem::UnnamedExpr(expr) | SelectItem::ExprWithAlias { expr, .. } => {
self.scan_expression_for_tombstone_references(
expr,
crdt_tables,
filtered_tables,
);
}
_ => {}
}
}
match &select.group_by {
sqlparser::ast::GroupByExpr::All(_) => {}
sqlparser::ast::GroupByExpr::Expressions(exprs, _) => {
for group_expr in exprs {
self.scan_expression_for_tombstone_references(
group_expr,
crdt_tables,
filtered_tables,
);
}
}
}
if let Some(having) = &select.having {
self.scan_expression_for_tombstone_references(
having,
crdt_tables,
filtered_tables,
);
}
}
SetExpr::SetOperation { left, right, .. } => {
self.analyze_set_expr_for_tombstone_references(left, crdt_tables, filtered_tables)?;
self.analyze_set_expr_for_tombstone_references(
right,
crdt_tables,
filtered_tables,
)?;
}
SetExpr::Query(query) => {
self.analyze_set_expr_for_tombstone_references(
&query.body,
crdt_tables,
filtered_tables,
)?;
}
SetExpr::Values(values) => {
for row in &values.rows {
for expr in row {
self.scan_expression_for_tombstone_references(
expr,
crdt_tables,
filtered_tables,
);
}
}
}
_ => {}
}
Ok(())
}
}

View File

@ -9,6 +9,7 @@ use ts_rs::TS;
// Der "z_"-Präfix soll sicherstellen, dass diese Trigger als Letzte ausgeführt werden
const INSERT_TRIGGER_TPL: &str = "z_crdt_{TABLE_NAME}_insert";
const UPDATE_TRIGGER_TPL: &str = "z_crdt_{TABLE_NAME}_update";
const DELETE_TRIGGER_TPL: &str = "z_crdt_{TABLE_NAME}_delete";
//const SYNC_ACTIVE_KEY: &str = "sync_active";
pub const TOMBSTONE_COLUMN: &str = "haex_tombstone";
@ -143,6 +144,7 @@ pub fn setup_triggers_for_table(
let insert_trigger_sql = generate_insert_trigger_sql(table_name, &pks, &cols_to_track);
let update_trigger_sql = generate_update_trigger_sql(table_name, &pks, &cols_to_track);
let delete_trigger_sql = generate_delete_trigger_sql(table_name, &pks, &cols_to_track);
if recreate {
drop_triggers_for_table(&tx, table_name)?;
@ -150,6 +152,7 @@ pub fn setup_triggers_for_table(
tx.execute_batch(&insert_trigger_sql)?;
tx.execute_batch(&update_trigger_sql)?;
tx.execute_batch(&delete_trigger_sql)?;
Ok(TriggerSetupResult::Success)
}
@ -170,28 +173,7 @@ pub fn get_table_schema(conn: &Connection, table_name: &str) -> RusqliteResult<V
rows.collect()
}
/// Holt alle Foreign Key Spalten einer Tabelle.
/// Gibt eine Liste der Spaltennamen zurück, die Foreign Keys sind.
pub fn get_foreign_key_columns(conn: &Connection, table_name: &str) -> RusqliteResult<Vec<String>> {
if !is_safe_identifier(table_name) {
return Err(rusqlite::Error::InvalidParameterName(format!(
"Invalid or unsafe table name provided: {}",
table_name
))
.into());
}
let sql = format!("PRAGMA foreign_key_list(\"{}\");", table_name);
let mut stmt = conn.prepare(&sql)?;
// foreign_key_list gibt Spalten zurück: id, seq, table, from, to, on_update, on_delete, match
// Wir brauchen die "from" Spalte, die den Namen der FK-Spalte in der aktuellen Tabelle enthält
let rows = stmt.query_map([], |row| {
row.get::<_, String>("from")
})?;
rows.collect()
}
// get_foreign_key_columns() removed - not needed with hard deletes (no ON CONFLICT logic)
pub fn drop_triggers_for_table(
tx: &Transaction, // Arbeitet direkt auf einer Transaktion
@ -209,8 +191,13 @@ pub fn drop_triggers_for_table(
drop_trigger_sql(INSERT_TRIGGER_TPL.replace("{TABLE_NAME}", table_name));
let drop_update_trigger_sql =
drop_trigger_sql(UPDATE_TRIGGER_TPL.replace("{TABLE_NAME}", table_name));
let drop_delete_trigger_sql =
drop_trigger_sql(DELETE_TRIGGER_TPL.replace("{TABLE_NAME}", table_name));
let sql_batch = format!("{}\n{}", drop_insert_trigger_sql, drop_update_trigger_sql);
let sql_batch = format!(
"{}\n{}\n{}",
drop_insert_trigger_sql, drop_update_trigger_sql, drop_delete_trigger_sql
);
tx.execute_batch(&sql_batch)?;
Ok(())
@ -350,19 +337,7 @@ fn generate_update_trigger_sql(table_name: &str, pks: &[String], cols: &[String]
}
}
// Soft-delete loggen
writeln!(
&mut body,
"INSERT INTO {log_table} (haex_timestamp, op_type, table_name, row_pks)
SELECT NEW.\"{hlc_col}\", 'DELETE', '{table}', json_object({pk_payload})
WHERE NEW.\"{tombstone_col}\" = 1 AND OLD.\"{tombstone_col}\" = 0;",
log_table = TABLE_CRDT_LOGS,
hlc_col = HLC_TIMESTAMP_COLUMN,
table = table_name,
pk_payload = pk_json_payload,
tombstone_col = TOMBSTONE_COLUMN
)
.unwrap();
// Soft-delete Logging entfernt - wir nutzen jetzt Hard Deletes mit eigenem BEFORE DELETE Trigger
let trigger_name = UPDATE_TRIGGER_TPL.replace("{TABLE_NAME}", table_name);
@ -375,3 +350,54 @@ fn generate_update_trigger_sql(table_name: &str, pks: &[String], cols: &[String]
END;"
)
}
/// Generiert das SQL für den BEFORE DELETE-Trigger.
/// WICHTIG: BEFORE DELETE damit die Daten noch verfügbar sind!
fn generate_delete_trigger_sql(table_name: &str, pks: &[String], cols: &[String]) -> String {
let pk_json_payload = pks
.iter()
.map(|pk| format!("'{}', OLD.\"{}\"", pk, pk))
.collect::<Vec<_>>()
.join(", ");
let mut body = String::new();
// Alle Spaltenwerte speichern für mögliche Wiederherstellung
if !cols.is_empty() {
for col in cols {
writeln!(
&mut body,
"INSERT INTO {log_table} (haex_timestamp, op_type, table_name, row_pks, column_name, old_value)
VALUES (OLD.\"{hlc_col}\", 'DELETE', '{table}', json_object({pk_payload}), '{column}',
json_object('value', OLD.\"{column}\"));",
log_table = TABLE_CRDT_LOGS,
hlc_col = HLC_TIMESTAMP_COLUMN,
table = table_name,
pk_payload = pk_json_payload,
column = col
).unwrap();
}
} else {
// Nur PKs -> minimales Delete Log
writeln!(
&mut body,
"INSERT INTO {log_table} (haex_timestamp, op_type, table_name, row_pks)
VALUES (OLD.\"{hlc_col}\", 'DELETE', '{table}', json_object({pk_payload}));",
log_table = TABLE_CRDT_LOGS,
hlc_col = HLC_TIMESTAMP_COLUMN,
table = table_name,
pk_payload = pk_json_payload
).unwrap();
}
let trigger_name = DELETE_TRIGGER_TPL.replace("{TABLE_NAME}", table_name);
format!(
"CREATE TRIGGER IF NOT EXISTS \"{trigger_name}\"
BEFORE DELETE ON \"{table_name}\"
FOR EACH ROW
BEGIN
{body}
END;"
)
}

View File

@ -135,7 +135,6 @@ impl SqlExecutor {
if let Some(table_name) = transformer.transform_execute_statement_with_table_info(
&mut statement,
&hlc_timestamp,
tx,
)? {
modified_schema_tables.insert(table_name);
}
@ -239,7 +238,6 @@ impl SqlExecutor {
if let Some(table_name) = transformer.transform_execute_statement_with_table_info(
&mut statement,
&hlc_timestamp,
tx,
)? {
modified_schema_tables.insert(table_name);
}
@ -460,13 +458,12 @@ impl SqlExecutor {
}
let sql_params = ValueConverter::convert_params(params)?;
let transformer = CrdtTransformer::new();
let mut stmt_to_execute = ast_vec.pop().unwrap();
transformer.transform_select_statement(&mut stmt_to_execute)?;
// Hard Delete: Keine SELECT-Transformation mehr nötig
let stmt_to_execute = ast_vec.pop().unwrap();
let transformed_sql = stmt_to_execute.to_string();
eprintln!("DEBUG: Transformed SELECT: {}", transformed_sql);
eprintln!("DEBUG: SELECT (no transformation): {}", transformed_sql);
let mut prepared_stmt = conn.prepare(&transformed_sql)?;

View File

@ -232,14 +232,8 @@ pub async fn extension_sql_select(
// Database operation
with_connection(&state.db, |conn| {
let sql_params = ValueConverter::convert_params(&params)?;
let transformer = CrdtTransformer::new();
// Use the last statement for result set
let last_statement = ast_vec.pop().unwrap();
let mut stmt_to_execute = last_statement;
// Transform the statement
transformer.transform_select_statement(&mut stmt_to_execute)?;
// Hard Delete: Keine SELECT-Transformation mehr nötig
let stmt_to_execute = ast_vec.pop().unwrap();
let transformed_sql = stmt_to_execute.to_string();
// Prepare and execute query

View File

@ -563,7 +563,9 @@ const onSwiperInit = (swiper: SwiperType) => {
}
const onSlideChange = (swiper: SwiperType) => {
workspaceStore.switchToWorkspace(swiper.activeIndex)
workspaceStore.switchToWorkspace(
workspaceStore.workspaces.at(swiper.activeIndex)?.id,
)
}
// Workspace control handlers

View File

@ -6,7 +6,7 @@
? 'ring-2 ring-secondary bg-secondary/10'
: 'hover:ring-2 hover:ring-gray-300',
]"
@click="workspaceStore.slideToWorkspace(workspace.position)"
@click="workspaceStore.slideToWorkspace(workspace.id)"
>
<template #header>
<div class="flex justify-between">

View File

@ -153,4 +153,7 @@ en:
welcome: 'Have fun with'
lastUsed: 'Last used Vaults'
sponsors: 'Supported by'
remove:
title: Delete Vault
description: Are you sure you really want to delete {vaultName}?
</i18n>

View File

@ -1,7 +1,6 @@
import { asc, eq } from 'drizzle-orm'
import { eq } from 'drizzle-orm'
import {
haexWorkspaces,
type InsertHaexWorkspaces,
type SelectHaexWorkspaces,
} from '~~/src-tauri/database/schemas'
import type { Swiper } from 'swiper/types'
@ -33,18 +32,18 @@ export const useWorkspaceStore = defineStore('workspaceStore', () => {
}
try {
const items = await currentVault.value.drizzle
/* const items = await currentVault.value.drizzle
.select()
.from(haexWorkspaces)
.orderBy(asc(haexWorkspaces.position))
console.log('loadWorkspacesAsync', items)
workspaces.value = items
workspaces.value = items */
// Create default workspace if none exist
if (items.length === 0) {
await addWorkspaceAsync('Workspace 1')
}
/* if (items.length === 0) { */
await addWorkspaceAsync('Workspace 1')
/* } */
} catch (error) {
console.error('Fehler beim Laden der Workspaces:', error)
throw error
@ -62,12 +61,16 @@ export const useWorkspaceStore = defineStore('workspaceStore', () => {
try {
const newIndex = workspaces.value.length + 1
const newWorkspace: InsertHaexWorkspaces = {
const newWorkspace: SelectHaexWorkspaces = {
id: crypto.randomUUID(),
name: name || `Workspace ${newIndex}`,
position: workspaces.value.length,
haexTimestamp: '',
haexTombstone: false,
}
const result = await currentVault.value.drizzle
workspaces.value.push(newWorkspace)
currentWorkspaceIndex.value = workspaces.value.length - 1
/* const result = await currentVault.value.drizzle
.insert(haexWorkspaces)
.values(newWorkspace)
.returning()
@ -76,7 +79,7 @@ export const useWorkspaceStore = defineStore('workspaceStore', () => {
workspaces.value.push(result[0])
currentWorkspaceIndex.value = workspaces.value.length - 1
return result[0]
}
} */
} catch (error) {
console.error('Fehler beim Hinzufügen des Workspace:', error)
throw error
@ -104,12 +107,27 @@ export const useWorkspaceStore = defineStore('workspaceStore', () => {
const index = workspaces.value.findIndex((ws) => ws.id === workspaceId)
if (index === -1) return
try {
await currentVault.value.drizzle
.delete(haexWorkspaces)
.where(eq(haexWorkspaces.id, workspaceId))
workspaces.value.splice(index, 1)
workspaces.value.forEach((workspace, index) => (workspace.position = index))
workspaces.value.splice(index, 1)
try {
/* await currentVault.value.drizzle.transaction(async (tx) => {
await tx
.delete(haexWorkspaces)
.where(eq(haexWorkspaces.id, workspaceId))
workspaces.value.splice(index, 1)
workspaces.value.forEach(
(workspace, index) => (workspace.position = index),
)
for (const workspace of workspaces.value) {
await tx
.update(haexWorkspaces)
.set({ position: index })
.where(eq(haexWorkspaces.position, workspace.position))
}
}) */
// Adjust current index if needed
if (currentWorkspaceIndex.value >= workspaces.value.length) {
@ -121,10 +139,17 @@ export const useWorkspaceStore = defineStore('workspaceStore', () => {
}
}
const switchToWorkspace = (index: number) => {
if (index >= 0 && index < workspaces.value.length) {
currentWorkspaceIndex.value = index
const switchToWorkspace = (workspaceId?: string) => {
const workspace = workspaces.value.find((w) => w.id === workspaceId)
console.log('switchToWorkspace', workspace)
if (workspace) {
currentWorkspaceIndex.value = workspace?.position
} else {
currentWorkspaceIndex.value = 0
}
return currentWorkspaceIndex.value
}
const switchToNext = () => {
@ -163,7 +188,8 @@ export const useWorkspaceStore = defineStore('workspaceStore', () => {
}
}
const slideToWorkspace = (index: number) => {
const slideToWorkspace = (workspaceId?: string) => {
const index = switchToWorkspace(workspaceId)
if (swiperInstance.value) {
swiperInstance.value.slideTo(index)
}