diff --git a/Database/__pycache__/id.cpython-311.pyc b/Database/__pycache__/id.cpython-311.pyc deleted file mode 100644 index bd9c37d1..00000000 Binary files a/Database/__pycache__/id.cpython-311.pyc and /dev/null differ diff --git a/Database/crafting/__pycache__/id.cpython-311.pyc b/Database/crafting/__pycache__/id.cpython-311.pyc new file mode 100644 index 00000000..0fb00eec Binary files /dev/null and b/Database/crafting/__pycache__/id.cpython-311.pyc differ diff --git a/Database/createTables.py b/Database/crafting/createTables.py similarity index 100% rename from Database/createTables.py rename to Database/crafting/createTables.py diff --git a/Database/crafting/data/ammo.csv b/Database/crafting/data/ammo.csv new file mode 100644 index 00000000..0690e534 --- /dev/null +++ b/Database/crafting/data/ammo.csv @@ -0,0 +1,2 @@ +id,result,input1,qty1,input2,qty2,input3,qty3,input4,qty4 +NULL,10000,20000,1,20002,1,20005,1,20001,15 diff --git a/Database/crafting/data/food.csv b/Database/crafting/data/food.csv new file mode 100644 index 00000000..fcf80197 --- /dev/null +++ b/Database/crafting/data/food.csv @@ -0,0 +1,2 @@ +id,result,input1,qty1,input2,qty2,input3,qty3,input4,qty4 +NULL,10000,20000,1,20002,1,20005,1,20001,15 \ No newline at end of file diff --git a/Database/crafting/data/gear.csv b/Database/crafting/data/gear.csv new file mode 100644 index 00000000..fcf80197 --- /dev/null +++ b/Database/crafting/data/gear.csv @@ -0,0 +1,2 @@ +id,result,input1,qty1,input2,qty2,input3,qty3,input4,qty4 +NULL,10000,20000,1,20002,1,20005,1,20001,15 \ No newline at end of file diff --git a/Database/crafting/data/material.csv b/Database/crafting/data/material.csv new file mode 100644 index 00000000..fcf80197 --- /dev/null +++ b/Database/crafting/data/material.csv @@ -0,0 +1,2 @@ +id,result,input1,qty1,input2,qty2,input3,qty3,input4,qty4 +NULL,10000,20000,1,20002,1,20005,1,20001,15 \ No newline at end of file diff --git a/Database/crafting/data/med.csv b/Database/crafting/data/med.csv new file mode 100644 index 00000000..fcf80197 --- /dev/null +++ b/Database/crafting/data/med.csv @@ -0,0 +1,2 @@ +id,result,input1,qty1,input2,qty2,input3,qty3,input4,qty4 +NULL,10000,20000,1,20002,1,20005,1,20001,15 \ No newline at end of file diff --git a/Database/crafting/data/scrap.csv b/Database/crafting/data/scrap.csv new file mode 100644 index 00000000..fcf80197 --- /dev/null +++ b/Database/crafting/data/scrap.csv @@ -0,0 +1,2 @@ +id,result,input1,qty1,input2,qty2,input3,qty3,input4,qty4 +NULL,10000,20000,1,20002,1,20005,1,20001,15 \ No newline at end of file diff --git a/Database/crafting/data/weapon.csv b/Database/crafting/data/weapon.csv new file mode 100644 index 00000000..fcf80197 --- /dev/null +++ b/Database/crafting/data/weapon.csv @@ -0,0 +1,2 @@ +id,result,input1,qty1,input2,qty2,input3,qty3,input4,qty4 +NULL,10000,20000,1,20002,1,20005,1,20001,15 \ No newline at end of file diff --git a/Database/crafting/database.db b/Database/crafting/database.db new file mode 100644 index 00000000..5b384b2d Binary files /dev/null and b/Database/crafting/database.db differ diff --git a/Database/crafting/id.py b/Database/crafting/id.py new file mode 100644 index 00000000..37a7cd8f --- /dev/null +++ b/Database/crafting/id.py @@ -0,0 +1,10 @@ +# starting_ids.py + +starting_ids = { + "ammo": 1000, + "food": 2000, + "gear": 3000, + "material": 4000, + "weapon":5000, + "scrap": 9000, +} diff --git a/Database/importData.py b/Database/crafting/importData.py similarity index 100% rename from Database/importData.py rename to Database/crafting/importData.py diff --git a/Database/main.py b/Database/crafting/main.py similarity index 100% rename from Database/main.py rename to Database/crafting/main.py diff --git a/Database/data/med.csv b/Database/data/med.csv deleted file mode 100644 index 6c9f96a7..00000000 --- a/Database/data/med.csv +++ /dev/null @@ -1,16 +0,0 @@ -id,type,name,rarity,weight,height,width,stack,value -NULL,meds,Bandage,1,0.1411,1,1,5,6500 -NULL,meds,Small Health Kit (SHK),1,0.18,1,1,1,32000 -NULL,meds,Individual First Aid Kit,2,0.22046,1,1,1,60000 -NULL,meds,Dual First Aid Kit,2,0.44092,1,2,2,100000 -NULL,meds,Group First Aid Kit,3,0.88184,2,2,4,120000 -NULL,meds,Adaptive First Aid Kit,4,0.22046,1,1,1,90000 -NULL,meds,Surgery Kit,2,0.88185,1,1,2,100000 -NULL,meds,Surgery Kit (L),3,1.32278,1,2,3,150000 -NULL,meds,Adrenaline,3,0.22046,1,1,1,50000 -NULL,meds,Pain killer,1,0.05512,1,1,4,10000 -NULL,meds,Morphine,2,0.22046,1,1,1,40000 -NULL,meds,Caffeine (pill),2,0.05512,1,1,4,2500 -NULL,meds,Tourniquet,1,0.37371,1,1,2,11000 -NULL,meds,Muscle Stimulant,4,0,0,0,0,0 -NULL,meds,Rad-x,0,0,0,0,0,0 diff --git a/Database/items/__pycache__/id.cpython-311.pyc b/Database/items/__pycache__/id.cpython-311.pyc new file mode 100644 index 00000000..0bf4de40 Binary files /dev/null and b/Database/items/__pycache__/id.cpython-311.pyc differ diff --git a/Database/items/createTables.py b/Database/items/createTables.py new file mode 100644 index 00000000..0660c275 --- /dev/null +++ b/Database/items/createTables.py @@ -0,0 +1,78 @@ +import sqlite3 +import os +import csv + + +def get_data_type(value): + try: + int(value) + return "INTEGER" + except ValueError: + try: + float(value) + return "REAL" + except ValueError: + return "TEXT" + + +# Connect to the SQLite database and delete existing tables +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Get a list of CSV files in the "data" directory +data_dir = "data" # Change this to your data directory path +csv_files = [f for f in os.listdir(data_dir) if f.endswith(".csv")] + +# Drop all existing tables except for sqlite_sequence +cursor.execute("PRAGMA foreign_keys = OFF;") +cursor.execute( + "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_sequence';" +) +existing_tables = cursor.fetchall() +for table in existing_tables: + cursor.execute(f"DROP TABLE IF EXISTS {table[0]};") + +# Commit the changes to delete existing tables +conn.commit() + +# Iterate through CSV files and create new tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[0] + + # Read the first row of the CSV file to determine the column names + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + header = next(csv_reader) + + # Read the second row to determine data types + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row + data_row = next(csv_reader) + data_types = [get_data_type(value) for value in data_row] + + # Add a primary key column if needed (replace 'unique_id' with your unique identifier column name) + if "unique_id" in header: + header[header.index("unique_id")] += " PRIMARY KEY" + + # Generate the CREATE TABLE statement dynamically based on the column names and data types + create_table_sql = f"CREATE TABLE IF NOT EXISTS {table_name} (\n" + for column_name, data_type in zip(header, data_types): + create_table_sql += f" {column_name} {data_type},\n" + create_table_sql = create_table_sql.rstrip(",\n") + "\n);" + + # Execute the CREATE TABLE statement + cursor.execute(create_table_sql) + + # Read and insert data from the CSV file into the table + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row + for row in csv_reader: + placeholders = ",".join(["?"] * len(row)) + insert_sql = f"INSERT INTO {table_name} VALUES ({placeholders});" + cursor.execute(insert_sql, row) + +# Commit the changes and close the connection +conn.commit() +conn.close() diff --git a/Database/data/ammo.csv b/Database/items/data/ammo.csv similarity index 100% rename from Database/data/ammo.csv rename to Database/items/data/ammo.csv diff --git a/Database/data/craft.csv b/Database/items/data/craft.csv similarity index 100% rename from Database/data/craft.csv rename to Database/items/data/craft.csv diff --git a/Database/data/gear.csv b/Database/items/data/gear.csv similarity index 100% rename from Database/data/gear.csv rename to Database/items/data/gear.csv diff --git a/Database/items/data/med.csv b/Database/items/data/med.csv new file mode 100644 index 00000000..d8a18411 --- /dev/null +++ b/Database/items/data/med.csv @@ -0,0 +1,16 @@ +id,type,name,rarity,weight,height,width,stack,value +NULL,bleed1,Bandage,1,0.1411,1,1,5,6500 +NULL,health,Small Health Kit (SHK),1,0.18,1,1,1,32000 +NULL,health,Individual First Aid Kit,2,0.22046,1,1,1,60000 +NULL,health,Dual First Aid Kit,2,0.44092,1,2,2,100000 +NULL,health,Group First Aid Kit,3,0.88184,2,2,4,120000 +NULL,health,Adaptive First Aid Kit,4,0.22046,1,1,1,90000 +NULL,surgery,Surgery Kit,2,0.88185,1,1,2,100000 +NULL,surgery,Surgery Kit (L),3,1.32278,1,2,3,150000 +NULL,stim,Adrenaline,3,0.22046,1,1,1,50000 +NULL,stim,Pain killer,1,0.05512,1,1,4,10000 +NULL,stim,Morphine,2,0.22046,1,1,1,40000 +NULL,stim,Caffeine (pill),2,0.05512,1,1,4,2500 +NULL,bleed2,Tourniquet,1,0.37371,1,1,2,11000 +NULL,stim,Muscle Stimulant,4,0,0,0,0,0 +NULL,stim,Rad-x,0,0,0,0,0,0 diff --git a/Database/data/part.csv b/Database/items/data/part.csv similarity index 100% rename from Database/data/part.csv rename to Database/items/data/part.csv diff --git a/Database/data/reload.csv b/Database/items/data/reload.csv similarity index 100% rename from Database/data/reload.csv rename to Database/items/data/reload.csv diff --git a/Database/data/weapon.csv b/Database/items/data/weapon.csv similarity index 100% rename from Database/data/weapon.csv rename to Database/items/data/weapon.csv diff --git a/Database/database.db b/Database/items/database.db similarity index 87% rename from Database/database.db rename to Database/items/database.db index 35a50d64..7b634c5a 100644 Binary files a/Database/database.db and b/Database/items/database.db differ diff --git a/Database/id.py b/Database/items/id.py similarity index 62% rename from Database/id.py rename to Database/items/id.py index db108b36..3454a3f7 100644 --- a/Database/id.py +++ b/Database/items/id.py @@ -1,9 +1,10 @@ # starting_ids.py starting_ids = { - "reloading": 20000, + "reload": 20000, "ammo": 10000, "craft": 20000, "gear": 30000, - "parts": 40000, + "part": 40000, + "med":50000, } diff --git a/Database/items/importData.py b/Database/items/importData.py new file mode 100644 index 00000000..6d64306d --- /dev/null +++ b/Database/items/importData.py @@ -0,0 +1,47 @@ +import csv +import os +import sqlite3 +from id import starting_ids + +# Connect to the SQLite database +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Define the directory where the CSV files are located +csv_directory = "data" # Change this to your directory path + + +# Function to load data from a CSV file into a table +def load_csv_data(csv_path, table_name, cursor, starting_id): + # Delete existing data in the table + delete_query = f"DELETE FROM {table_name}" + cursor.execute(delete_query) + + with open(csv_path, newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row if it exists in the CSV + for row in csv_reader: + # Exclude the first column (id) from the row + values = row[1:] + placeholders = ", ".join(["?"] * len(values)) + insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" + cursor.execute(insert_query, [starting_id] + values) + starting_id += 1 + + +# Get a list of CSV files in the data directory +csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] + +# Loop through the CSV files and load data into respective tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[ + 0 + ] # Remove the file extension to get the table name + csv_path = os.path.join(csv_directory, csv_file) + if table_name in starting_ids: + starting_id = starting_ids[table_name] + load_csv_data(csv_path, table_name, cursor, starting_id) + +# Commit the changes and close the connection +conn.commit() +conn.close() diff --git a/Database/items/main.py b/Database/items/main.py new file mode 100644 index 00000000..d499cdb5 --- /dev/null +++ b/Database/items/main.py @@ -0,0 +1,21 @@ +import subprocess + +# Define the paths to your Python scripts +create_tables_script = "createTables.py" +import_data_script = "importData.py" + +# Run the createTables.py script +try: + subprocess.run(["python", create_tables_script], check=True) + print("createTables.py script executed successfully.") +except subprocess.CalledProcessError: + print("Error running createTables.py script.") + exit(1) + +# Run the importData.py script +try: + subprocess.run(["python", import_data_script], check=True) + print("importData.py script executed successfully.") +except subprocess.CalledProcessError: + print("Error running importData.py script.") + exit(1)