diff --git a/Database/crafting/__pycache__/id.cpython-311.pyc b/Database/crafting/__pycache__/id.cpython-311.pyc deleted file mode 100644 index 5794296d..00000000 Binary files a/Database/crafting/__pycache__/id.cpython-311.pyc and /dev/null differ diff --git a/Database/crafting/id.py b/Database/crafting/id.py deleted file mode 100644 index 37a7cd8f..00000000 --- a/Database/crafting/id.py +++ /dev/null @@ -1,10 +0,0 @@ -# starting_ids.py - -starting_ids = { - "ammo": 1000, - "food": 2000, - "gear": 3000, - "material": 4000, - "weapon":5000, - "scrap": 9000, -} diff --git a/Database/crafting/importData.py b/Database/crafting/importData.py deleted file mode 100644 index 6d64306d..00000000 --- a/Database/crafting/importData.py +++ /dev/null @@ -1,47 +0,0 @@ -import csv -import os -import sqlite3 -from id import starting_ids - -# Connect to the SQLite database -conn = sqlite3.connect("database.db") -cursor = conn.cursor() - -# Define the directory where the CSV files are located -csv_directory = "data" # Change this to your directory path - - -# Function to load data from a CSV file into a table -def load_csv_data(csv_path, table_name, cursor, starting_id): - # Delete existing data in the table - delete_query = f"DELETE FROM {table_name}" - cursor.execute(delete_query) - - with open(csv_path, newline="") as csvfile: - csv_reader = csv.reader(csvfile) - next(csv_reader) # Skip the header row if it exists in the CSV - for row in csv_reader: - # Exclude the first column (id) from the row - values = row[1:] - placeholders = ", ".join(["?"] * len(values)) - insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" - cursor.execute(insert_query, [starting_id] + values) - starting_id += 1 - - -# Get a list of CSV files in the data directory -csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] - -# Loop through the CSV files and load data into respective tables -for csv_file in csv_files: - table_name = os.path.splitext(csv_file)[ - 0 - ] # Remove the file extension to get the table name - csv_path = os.path.join(csv_directory, csv_file) - if table_name in starting_ids: - starting_id = starting_ids[table_name] - load_csv_data(csv_path, table_name, cursor, starting_id) - -# Commit the changes and close the connection -conn.commit() -conn.close() diff --git a/Database/crafting/main.py b/Database/crafting/main.py deleted file mode 100644 index d499cdb5..00000000 --- a/Database/crafting/main.py +++ /dev/null @@ -1,21 +0,0 @@ -import subprocess - -# Define the paths to your Python scripts -create_tables_script = "createTables.py" -import_data_script = "importData.py" - -# Run the createTables.py script -try: - subprocess.run(["python", create_tables_script], check=True) - print("createTables.py script executed successfully.") -except subprocess.CalledProcessError: - print("Error running createTables.py script.") - exit(1) - -# Run the importData.py script -try: - subprocess.run(["python", import_data_script], check=True) - print("importData.py script executed successfully.") -except subprocess.CalledProcessError: - print("Error running importData.py script.") - exit(1) diff --git a/Database/crafting/createTables.py b/Database/crafting/update.py similarity index 61% rename from Database/crafting/createTables.py rename to Database/crafting/update.py index 0660c275..39a27778 100644 --- a/Database/crafting/createTables.py +++ b/Database/crafting/update.py @@ -2,6 +2,17 @@ import sqlite3 import os import csv +# starting_ids.py + +starting_ids = { + "ammo": 1000, + "food": 2000, + "gear": 3000, + "material": 4000, + "weapon":5000, + "scrap": 9000, +} + def get_data_type(value): try: @@ -76,3 +87,47 @@ for csv_file in csv_files: # Commit the changes and close the connection conn.commit() conn.close() + + +# Connect to the SQLite database +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Define the directory where the CSV files are located +csv_directory = "data" # Change this to your directory path + + +# Function to load data from a CSV file into a table +def load_csv_data(csv_path, table_name, cursor, starting_id): + # Delete existing data in the table + delete_query = f"DELETE FROM {table_name}" + cursor.execute(delete_query) + + with open(csv_path, newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row if it exists in the CSV + for row in csv_reader: + # Exclude the first column (id) from the row + values = row[1:] + placeholders = ", ".join(["?"] * len(values)) + insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" + cursor.execute(insert_query, [starting_id] + values) + starting_id += 1 + + +# Get a list of CSV files in the data directory +csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] + +# Loop through the CSV files and load data into respective tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[ + 0 + ] # Remove the file extension to get the table name + csv_path = os.path.join(csv_directory, csv_file) + if table_name in starting_ids: + starting_id = starting_ids[table_name] + load_csv_data(csv_path, table_name, cursor, starting_id) + +# Commit the changes and close the connection +conn.commit() +conn.close() \ No newline at end of file diff --git a/Database/items/__pycache__/id.cpython-311.pyc b/Database/items/__pycache__/id.cpython-311.pyc deleted file mode 100644 index 4b3c3fdb..00000000 Binary files a/Database/items/__pycache__/id.cpython-311.pyc and /dev/null differ diff --git a/Database/items/id.py b/Database/items/id.py deleted file mode 100644 index 80233901..00000000 --- a/Database/items/id.py +++ /dev/null @@ -1,12 +0,0 @@ -# starting_ids.py -# items database - -starting_ids = { - "reload": 20000, - "ammo": 10000, - "craft": 20000, - "gear": 30000, - "part": 40000, - "med": 50000, - "food": 8000, -} diff --git a/Database/items/importData.py b/Database/items/importData.py deleted file mode 100644 index 6d64306d..00000000 --- a/Database/items/importData.py +++ /dev/null @@ -1,47 +0,0 @@ -import csv -import os -import sqlite3 -from id import starting_ids - -# Connect to the SQLite database -conn = sqlite3.connect("database.db") -cursor = conn.cursor() - -# Define the directory where the CSV files are located -csv_directory = "data" # Change this to your directory path - - -# Function to load data from a CSV file into a table -def load_csv_data(csv_path, table_name, cursor, starting_id): - # Delete existing data in the table - delete_query = f"DELETE FROM {table_name}" - cursor.execute(delete_query) - - with open(csv_path, newline="") as csvfile: - csv_reader = csv.reader(csvfile) - next(csv_reader) # Skip the header row if it exists in the CSV - for row in csv_reader: - # Exclude the first column (id) from the row - values = row[1:] - placeholders = ", ".join(["?"] * len(values)) - insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" - cursor.execute(insert_query, [starting_id] + values) - starting_id += 1 - - -# Get a list of CSV files in the data directory -csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] - -# Loop through the CSV files and load data into respective tables -for csv_file in csv_files: - table_name = os.path.splitext(csv_file)[ - 0 - ] # Remove the file extension to get the table name - csv_path = os.path.join(csv_directory, csv_file) - if table_name in starting_ids: - starting_id = starting_ids[table_name] - load_csv_data(csv_path, table_name, cursor, starting_id) - -# Commit the changes and close the connection -conn.commit() -conn.close() diff --git a/Database/items/main.py b/Database/items/main.py deleted file mode 100644 index d499cdb5..00000000 --- a/Database/items/main.py +++ /dev/null @@ -1,21 +0,0 @@ -import subprocess - -# Define the paths to your Python scripts -create_tables_script = "createTables.py" -import_data_script = "importData.py" - -# Run the createTables.py script -try: - subprocess.run(["python", create_tables_script], check=True) - print("createTables.py script executed successfully.") -except subprocess.CalledProcessError: - print("Error running createTables.py script.") - exit(1) - -# Run the importData.py script -try: - subprocess.run(["python", import_data_script], check=True) - print("importData.py script executed successfully.") -except subprocess.CalledProcessError: - print("Error running importData.py script.") - exit(1) diff --git a/Database/items/update.py b/Database/items/update.py new file mode 100644 index 00000000..f07f24ea --- /dev/null +++ b/Database/items/update.py @@ -0,0 +1,134 @@ +import sqlite3 +import os +import csv + +# starting_ids.py +# items database + +starting_ids = { + "reload": 20000, + "ammo": 10000, + "craft": 20000, + "gear": 30000, + "part": 40000, + "med": 50000, + "food": 8000, +} + +def get_data_type(value): + try: + int(value) + return "INTEGER" + except ValueError: + try: + float(value) + return "REAL" + except ValueError: + return "TEXT" + + +# Connect to the SQLite database and delete existing tables +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Get a list of CSV files in the "data" directory +data_dir = "data" # Change this to your data directory path +csv_files = [f for f in os.listdir(data_dir) if f.endswith(".csv")] + +# Drop all existing tables except for sqlite_sequence +cursor.execute("PRAGMA foreign_keys = OFF;") +cursor.execute( + "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_sequence';" +) +existing_tables = cursor.fetchall() +for table in existing_tables: + cursor.execute(f"DROP TABLE IF EXISTS {table[0]};") + +# Commit the changes to delete existing tables +conn.commit() + +# Iterate through CSV files and create new tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[0] + + # Read the first row of the CSV file to determine the column names + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + header = next(csv_reader) + + # Read the second row to determine data types + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row + data_row = next(csv_reader) + data_types = [get_data_type(value) for value in data_row] + + # Add a primary key column if needed (replace 'unique_id' with your unique identifier column name) + if "unique_id" in header: + header[header.index("unique_id")] += " PRIMARY KEY" + + # Generate the CREATE TABLE statement dynamically based on the column names and data types + create_table_sql = f"CREATE TABLE IF NOT EXISTS {table_name} (\n" + for column_name, data_type in zip(header, data_types): + create_table_sql += f" {column_name} {data_type},\n" + create_table_sql = create_table_sql.rstrip(",\n") + "\n);" + + # Execute the CREATE TABLE statement + cursor.execute(create_table_sql) + + # Read and insert data from the CSV file into the table + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row + for row in csv_reader: + placeholders = ",".join(["?"] * len(row)) + insert_sql = f"INSERT INTO {table_name} VALUES ({placeholders});" + cursor.execute(insert_sql, row) + +# Commit the changes and close the connection +conn.commit() +conn.close() + + +# Connect to the SQLite database +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Define the directory where the CSV files are located +csv_directory = "data" # Change this to your directory path + + +# Function to load data from a CSV file into a table +def load_csv_data(csv_path, table_name, cursor, starting_id): + # Delete existing data in the table + delete_query = f"DELETE FROM {table_name}" + cursor.execute(delete_query) + + with open(csv_path, newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row if it exists in the CSV + for row in csv_reader: + # Exclude the first column (id) from the row + values = row[1:] + placeholders = ", ".join(["?"] * len(values)) + insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" + cursor.execute(insert_query, [starting_id] + values) + starting_id += 1 + + +# Get a list of CSV files in the data directory +csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] + +# Loop through the CSV files and load data into respective tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[ + 0 + ] # Remove the file extension to get the table name + csv_path = os.path.join(csv_directory, csv_file) + if table_name in starting_ids: + starting_id = starting_ids[table_name] + load_csv_data(csv_path, table_name, cursor, starting_id) + +# Commit the changes and close the connection +conn.commit() +conn.close() \ No newline at end of file diff --git a/Database/progress/data/exp.csv b/Database/progress/data/exp.csv new file mode 100644 index 00000000..b1f7224b --- /dev/null +++ b/Database/progress/data/exp.csv @@ -0,0 +1,101 @@ +id,lv,xp +NULL,1,84 +NULL,2,174 +NULL,3,276 +NULL,4,388 +NULL,5,512 +NULL,6,650 +NULL,7,801 +NULL,8,969 +NULL,9,1154 +NULL,10,1358 +NULL,11,1584 +NULL,12,1833 +NULL,13,2107 +NULL,14,2411 +NULL,15,2746 +NULL,16,3115 +NULL,17,3523 +NULL,18,3973 +NULL,19,4470 +NULL,20,5018 +NULL,21,5624 +NULL,22,6291 +NULL,23,7028 +NULL,24,7842 +NULL,25,8740 +NULL,26,9730 +NULL,27,10824 +NULL,28,12031 +NULL,29,13363 +NULL,30,14833 +NULL,31,16456 +NULL,32,18247 +NULL,33,20224 +NULL,34,22406 +NULL,35,24815 +NULL,36,27473 +NULL,37,30408 +NULL,38,33648 +NULL,39,37224 +NULL,40,41171 +NULL,41,45529 +NULL,42,50339 +NULL,43,55649 +NULL,44,61512 +NULL,45,67983 +NULL,46,75127 +NULL,47,83014 +NULL,48,91721 +NULL,49,101333 +NULL,50,111945 +NULL,51,123660 +NULL,52,136594 +NULL,53,150872 +NULL,54,166636 +NULL,55,184040 +NULL,56,203254 +NULL,57,224466 +NULL,58,247886 +NULL,59,273742 +NULL,60,302288 +NULL,61,333804 +NULL,62,368599 +NULL,63,407015 +NULL,64,449428 +NULL,65,496254 +NULL,66,547953 +NULL,67,605032 +NULL,68,668051 +NULL,69,737627 +NULL,70,814445 +NULL,71,899257 +NULL,72,992895 +NULL,73,1096278 +NULL,74,1210421 +NULL,75,1336443 +NULL,76,1475581 +NULL,77,1629200 +NULL,78,1798808 +NULL,79,1986068 +NULL,80,2192818 +NULL,81,2421087 +NULL,82,2673114 +NULL,83,2951373 +NULL,84,3258594 +NULL,85,3597792 +NULL,86,3972294 +NULL,87,4385776 +NULL,88,4842295 +NULL,89,5346332 +NULL,90,5902831 +NULL,91,6517253 +NULL,92,7195629 +NULL,93,7944614 +NULL,94,8771558 +NULL,95,9684577 +NULL,96,10692629 +NULL,97,11805606 +NULL,98,13034431 +NULL,99,14500000 +NULL,100,16500000 diff --git a/Database/progress/database.db b/Database/progress/database.db new file mode 100644 index 00000000..038a4def Binary files /dev/null and b/Database/progress/database.db differ diff --git a/Database/items/createTables.py b/Database/progress/update.py similarity index 62% rename from Database/items/createTables.py rename to Database/progress/update.py index 0660c275..63cdfb83 100644 --- a/Database/items/createTables.py +++ b/Database/progress/update.py @@ -2,6 +2,11 @@ import sqlite3 import os import csv +# starting_ids.py + +starting_ids = { + "exp": 1, +} def get_data_type(value): try: @@ -76,3 +81,47 @@ for csv_file in csv_files: # Commit the changes and close the connection conn.commit() conn.close() + + +# Connect to the SQLite database +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Define the directory where the CSV files are located +csv_directory = "data" # Change this to your directory path + + +# Function to load data from a CSV file into a table +def load_csv_data(csv_path, table_name, cursor, starting_id): + # Delete existing data in the table + delete_query = f"DELETE FROM {table_name}" + cursor.execute(delete_query) + + with open(csv_path, newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row if it exists in the CSV + for row in csv_reader: + # Exclude the first column (id) from the row + values = row[1:] + placeholders = ", ".join(["?"] * len(values)) + insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" + cursor.execute(insert_query, [starting_id] + values) + starting_id += 1 + + +# Get a list of CSV files in the data directory +csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] + +# Loop through the CSV files and load data into respective tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[ + 0 + ] # Remove the file extension to get the table name + csv_path = os.path.join(csv_directory, csv_file) + if table_name in starting_ids: + starting_id = starting_ids[table_name] + load_csv_data(csv_path, table_name, cursor, starting_id) + +# Commit the changes and close the connection +conn.commit() +conn.close() \ No newline at end of file diff --git a/Documents/crafting.md b/Documents/crafting.md index e68d67a2..c0aab749 100644 --- a/Documents/crafting.md +++ b/Documents/crafting.md @@ -10,6 +10,8 @@ ## Crafting +- recepie required to craft certain parts? similar to FO4? Some recepies are only aquired with a certain task completion? + ### Medicine - med crafting (lab) diff --git a/Documents/progress.md b/Documents/progress.md new file mode 100644 index 00000000..e69de29b diff --git a/README.md b/README.md index 3bf0791c..77d1e5a9 100644 --- a/README.md +++ b/README.md @@ -114,8 +114,9 @@ In the event that items despawn, the player can buy them back from an NPC for 3/ #### Progression +- ##### [Documentation](https://git.happytavern.co/OceanSlim/projectEli/src/branch/master/Documents/progress.md) - 1-10 prestiege system, - - once the last task is complete, players are given the option to restart, lose all progress and items and prestiege. + - once the last story task is complete, players are given the option to restart, lose all progress and items and prestiege. #### Crafting - ##### [Documentation](https://git.happytavern.co/OceanSlim/projectEli/src/branch/master/Documents/crafting.md)