From a83520be2dc66a2b83a0fc9df52480f4b4ecfbd4 Mon Sep 17 00:00:00 2001 From: 0ceanSlim Date: Thu, 12 Oct 2023 08:35:36 -0400 Subject: [PATCH] added progress db, updated database update logic for all dbs --- .../crafting/__pycache__/id.cpython-311.pyc | Bin 281 -> 0 bytes Database/crafting/id.py | 10 -- Database/crafting/importData.py | 47 ------ Database/crafting/main.py | 21 --- .../crafting/{createTables.py => update.py} | 55 +++++++ Database/items/__pycache__/id.cpython-311.pyc | Bin 293 -> 0 bytes Database/items/id.py | 12 -- Database/items/importData.py | 47 ------ Database/items/main.py | 21 --- Database/items/update.py | 134 ++++++++++++++++++ Database/progress/data/exp.csv | 101 +++++++++++++ Database/progress/database.db | Bin 0 -> 32768 bytes .../createTables.py => progress/update.py} | 49 +++++++ Documents/crafting.md | 2 + Documents/progress.md | 0 README.md | 3 +- 16 files changed, 343 insertions(+), 159 deletions(-) delete mode 100644 Database/crafting/__pycache__/id.cpython-311.pyc delete mode 100644 Database/crafting/id.py delete mode 100644 Database/crafting/importData.py delete mode 100644 Database/crafting/main.py rename Database/crafting/{createTables.py => update.py} (61%) delete mode 100644 Database/items/__pycache__/id.cpython-311.pyc delete mode 100644 Database/items/id.py delete mode 100644 Database/items/importData.py delete mode 100644 Database/items/main.py create mode 100644 Database/items/update.py create mode 100644 Database/progress/data/exp.csv create mode 100644 Database/progress/database.db rename Database/{items/createTables.py => progress/update.py} (62%) create mode 100644 Documents/progress.md diff --git a/Database/crafting/__pycache__/id.cpython-311.pyc b/Database/crafting/__pycache__/id.cpython-311.pyc deleted file mode 100644 index 5794296dd0bc646206fa7f2cabb242acdee7c058..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 281 zcmZ3^%ge<81gm0xrz`@}k3k$5V23h3O8^MuthPXum>|} za%8?>W?;y?092j1gBwUJ;0F>N!azbpnSnu*?G{U7Zf^c9mbCo*lv^z6sfk6mIC2w9 zQj0PZb8fMfrzRHU=iOp0PA*C;@Y7_x#Zz38SX7dkmmZ&)QoNGkGsqFYw4AMCobyvs zV+xA$vr>~wTyrvGToOwXlM;(lV}Lr-KpJ8)Q}hZde{tC4=BJeAq}mnn0JSm#aj_h};0Oc?Cc4vI{VMU;q*gJRn#E3K0MS!cBDm diff --git a/Database/crafting/id.py b/Database/crafting/id.py deleted file mode 100644 index 37a7cd8f..00000000 --- a/Database/crafting/id.py +++ /dev/null @@ -1,10 +0,0 @@ -# starting_ids.py - -starting_ids = { - "ammo": 1000, - "food": 2000, - "gear": 3000, - "material": 4000, - "weapon":5000, - "scrap": 9000, -} diff --git a/Database/crafting/importData.py b/Database/crafting/importData.py deleted file mode 100644 index 6d64306d..00000000 --- a/Database/crafting/importData.py +++ /dev/null @@ -1,47 +0,0 @@ -import csv -import os -import sqlite3 -from id import starting_ids - -# Connect to the SQLite database -conn = sqlite3.connect("database.db") -cursor = conn.cursor() - -# Define the directory where the CSV files are located -csv_directory = "data" # Change this to your directory path - - -# Function to load data from a CSV file into a table -def load_csv_data(csv_path, table_name, cursor, starting_id): - # Delete existing data in the table - delete_query = f"DELETE FROM {table_name}" - cursor.execute(delete_query) - - with open(csv_path, newline="") as csvfile: - csv_reader = csv.reader(csvfile) - next(csv_reader) # Skip the header row if it exists in the CSV - for row in csv_reader: - # Exclude the first column (id) from the row - values = row[1:] - placeholders = ", ".join(["?"] * len(values)) - insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" - cursor.execute(insert_query, [starting_id] + values) - starting_id += 1 - - -# Get a list of CSV files in the data directory -csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] - -# Loop through the CSV files and load data into respective tables -for csv_file in csv_files: - table_name = os.path.splitext(csv_file)[ - 0 - ] # Remove the file extension to get the table name - csv_path = os.path.join(csv_directory, csv_file) - if table_name in starting_ids: - starting_id = starting_ids[table_name] - load_csv_data(csv_path, table_name, cursor, starting_id) - -# Commit the changes and close the connection -conn.commit() -conn.close() diff --git a/Database/crafting/main.py b/Database/crafting/main.py deleted file mode 100644 index d499cdb5..00000000 --- a/Database/crafting/main.py +++ /dev/null @@ -1,21 +0,0 @@ -import subprocess - -# Define the paths to your Python scripts -create_tables_script = "createTables.py" -import_data_script = "importData.py" - -# Run the createTables.py script -try: - subprocess.run(["python", create_tables_script], check=True) - print("createTables.py script executed successfully.") -except subprocess.CalledProcessError: - print("Error running createTables.py script.") - exit(1) - -# Run the importData.py script -try: - subprocess.run(["python", import_data_script], check=True) - print("importData.py script executed successfully.") -except subprocess.CalledProcessError: - print("Error running importData.py script.") - exit(1) diff --git a/Database/crafting/createTables.py b/Database/crafting/update.py similarity index 61% rename from Database/crafting/createTables.py rename to Database/crafting/update.py index 0660c275..39a27778 100644 --- a/Database/crafting/createTables.py +++ b/Database/crafting/update.py @@ -2,6 +2,17 @@ import sqlite3 import os import csv +# starting_ids.py + +starting_ids = { + "ammo": 1000, + "food": 2000, + "gear": 3000, + "material": 4000, + "weapon":5000, + "scrap": 9000, +} + def get_data_type(value): try: @@ -76,3 +87,47 @@ for csv_file in csv_files: # Commit the changes and close the connection conn.commit() conn.close() + + +# Connect to the SQLite database +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Define the directory where the CSV files are located +csv_directory = "data" # Change this to your directory path + + +# Function to load data from a CSV file into a table +def load_csv_data(csv_path, table_name, cursor, starting_id): + # Delete existing data in the table + delete_query = f"DELETE FROM {table_name}" + cursor.execute(delete_query) + + with open(csv_path, newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row if it exists in the CSV + for row in csv_reader: + # Exclude the first column (id) from the row + values = row[1:] + placeholders = ", ".join(["?"] * len(values)) + insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" + cursor.execute(insert_query, [starting_id] + values) + starting_id += 1 + + +# Get a list of CSV files in the data directory +csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] + +# Loop through the CSV files and load data into respective tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[ + 0 + ] # Remove the file extension to get the table name + csv_path = os.path.join(csv_directory, csv_file) + if table_name in starting_ids: + starting_id = starting_ids[table_name] + load_csv_data(csv_path, table_name, cursor, starting_id) + +# Commit the changes and close the connection +conn.commit() +conn.close() \ No newline at end of file diff --git a/Database/items/__pycache__/id.cpython-311.pyc b/Database/items/__pycache__/id.cpython-311.pyc deleted file mode 100644 index 4b3c3fdb87f6a3af9f8c8bfd1832eba53fe79733..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 293 zcmZ3^%ge<81lyZ`r#mPX)@m8DJ}sDX6B{GXQmXdWcUnn$1ekCs~G3}l+>7j zqWrAXC%vKX5Vdi`?K5yum93B7ulo;0BMt2YwC#rVk82qJakli@1Rr0Ni>}kN^Mx diff --git a/Database/items/id.py b/Database/items/id.py deleted file mode 100644 index 80233901..00000000 --- a/Database/items/id.py +++ /dev/null @@ -1,12 +0,0 @@ -# starting_ids.py -# items database - -starting_ids = { - "reload": 20000, - "ammo": 10000, - "craft": 20000, - "gear": 30000, - "part": 40000, - "med": 50000, - "food": 8000, -} diff --git a/Database/items/importData.py b/Database/items/importData.py deleted file mode 100644 index 6d64306d..00000000 --- a/Database/items/importData.py +++ /dev/null @@ -1,47 +0,0 @@ -import csv -import os -import sqlite3 -from id import starting_ids - -# Connect to the SQLite database -conn = sqlite3.connect("database.db") -cursor = conn.cursor() - -# Define the directory where the CSV files are located -csv_directory = "data" # Change this to your directory path - - -# Function to load data from a CSV file into a table -def load_csv_data(csv_path, table_name, cursor, starting_id): - # Delete existing data in the table - delete_query = f"DELETE FROM {table_name}" - cursor.execute(delete_query) - - with open(csv_path, newline="") as csvfile: - csv_reader = csv.reader(csvfile) - next(csv_reader) # Skip the header row if it exists in the CSV - for row in csv_reader: - # Exclude the first column (id) from the row - values = row[1:] - placeholders = ", ".join(["?"] * len(values)) - insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" - cursor.execute(insert_query, [starting_id] + values) - starting_id += 1 - - -# Get a list of CSV files in the data directory -csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] - -# Loop through the CSV files and load data into respective tables -for csv_file in csv_files: - table_name = os.path.splitext(csv_file)[ - 0 - ] # Remove the file extension to get the table name - csv_path = os.path.join(csv_directory, csv_file) - if table_name in starting_ids: - starting_id = starting_ids[table_name] - load_csv_data(csv_path, table_name, cursor, starting_id) - -# Commit the changes and close the connection -conn.commit() -conn.close() diff --git a/Database/items/main.py b/Database/items/main.py deleted file mode 100644 index d499cdb5..00000000 --- a/Database/items/main.py +++ /dev/null @@ -1,21 +0,0 @@ -import subprocess - -# Define the paths to your Python scripts -create_tables_script = "createTables.py" -import_data_script = "importData.py" - -# Run the createTables.py script -try: - subprocess.run(["python", create_tables_script], check=True) - print("createTables.py script executed successfully.") -except subprocess.CalledProcessError: - print("Error running createTables.py script.") - exit(1) - -# Run the importData.py script -try: - subprocess.run(["python", import_data_script], check=True) - print("importData.py script executed successfully.") -except subprocess.CalledProcessError: - print("Error running importData.py script.") - exit(1) diff --git a/Database/items/update.py b/Database/items/update.py new file mode 100644 index 00000000..f07f24ea --- /dev/null +++ b/Database/items/update.py @@ -0,0 +1,134 @@ +import sqlite3 +import os +import csv + +# starting_ids.py +# items database + +starting_ids = { + "reload": 20000, + "ammo": 10000, + "craft": 20000, + "gear": 30000, + "part": 40000, + "med": 50000, + "food": 8000, +} + +def get_data_type(value): + try: + int(value) + return "INTEGER" + except ValueError: + try: + float(value) + return "REAL" + except ValueError: + return "TEXT" + + +# Connect to the SQLite database and delete existing tables +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Get a list of CSV files in the "data" directory +data_dir = "data" # Change this to your data directory path +csv_files = [f for f in os.listdir(data_dir) if f.endswith(".csv")] + +# Drop all existing tables except for sqlite_sequence +cursor.execute("PRAGMA foreign_keys = OFF;") +cursor.execute( + "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_sequence';" +) +existing_tables = cursor.fetchall() +for table in existing_tables: + cursor.execute(f"DROP TABLE IF EXISTS {table[0]};") + +# Commit the changes to delete existing tables +conn.commit() + +# Iterate through CSV files and create new tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[0] + + # Read the first row of the CSV file to determine the column names + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + header = next(csv_reader) + + # Read the second row to determine data types + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row + data_row = next(csv_reader) + data_types = [get_data_type(value) for value in data_row] + + # Add a primary key column if needed (replace 'unique_id' with your unique identifier column name) + if "unique_id" in header: + header[header.index("unique_id")] += " PRIMARY KEY" + + # Generate the CREATE TABLE statement dynamically based on the column names and data types + create_table_sql = f"CREATE TABLE IF NOT EXISTS {table_name} (\n" + for column_name, data_type in zip(header, data_types): + create_table_sql += f" {column_name} {data_type},\n" + create_table_sql = create_table_sql.rstrip(",\n") + "\n);" + + # Execute the CREATE TABLE statement + cursor.execute(create_table_sql) + + # Read and insert data from the CSV file into the table + with open(os.path.join(data_dir, csv_file), newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row + for row in csv_reader: + placeholders = ",".join(["?"] * len(row)) + insert_sql = f"INSERT INTO {table_name} VALUES ({placeholders});" + cursor.execute(insert_sql, row) + +# Commit the changes and close the connection +conn.commit() +conn.close() + + +# Connect to the SQLite database +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Define the directory where the CSV files are located +csv_directory = "data" # Change this to your directory path + + +# Function to load data from a CSV file into a table +def load_csv_data(csv_path, table_name, cursor, starting_id): + # Delete existing data in the table + delete_query = f"DELETE FROM {table_name}" + cursor.execute(delete_query) + + with open(csv_path, newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row if it exists in the CSV + for row in csv_reader: + # Exclude the first column (id) from the row + values = row[1:] + placeholders = ", ".join(["?"] * len(values)) + insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" + cursor.execute(insert_query, [starting_id] + values) + starting_id += 1 + + +# Get a list of CSV files in the data directory +csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] + +# Loop through the CSV files and load data into respective tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[ + 0 + ] # Remove the file extension to get the table name + csv_path = os.path.join(csv_directory, csv_file) + if table_name in starting_ids: + starting_id = starting_ids[table_name] + load_csv_data(csv_path, table_name, cursor, starting_id) + +# Commit the changes and close the connection +conn.commit() +conn.close() \ No newline at end of file diff --git a/Database/progress/data/exp.csv b/Database/progress/data/exp.csv new file mode 100644 index 00000000..b1f7224b --- /dev/null +++ b/Database/progress/data/exp.csv @@ -0,0 +1,101 @@ +id,lv,xp +NULL,1,84 +NULL,2,174 +NULL,3,276 +NULL,4,388 +NULL,5,512 +NULL,6,650 +NULL,7,801 +NULL,8,969 +NULL,9,1154 +NULL,10,1358 +NULL,11,1584 +NULL,12,1833 +NULL,13,2107 +NULL,14,2411 +NULL,15,2746 +NULL,16,3115 +NULL,17,3523 +NULL,18,3973 +NULL,19,4470 +NULL,20,5018 +NULL,21,5624 +NULL,22,6291 +NULL,23,7028 +NULL,24,7842 +NULL,25,8740 +NULL,26,9730 +NULL,27,10824 +NULL,28,12031 +NULL,29,13363 +NULL,30,14833 +NULL,31,16456 +NULL,32,18247 +NULL,33,20224 +NULL,34,22406 +NULL,35,24815 +NULL,36,27473 +NULL,37,30408 +NULL,38,33648 +NULL,39,37224 +NULL,40,41171 +NULL,41,45529 +NULL,42,50339 +NULL,43,55649 +NULL,44,61512 +NULL,45,67983 +NULL,46,75127 +NULL,47,83014 +NULL,48,91721 +NULL,49,101333 +NULL,50,111945 +NULL,51,123660 +NULL,52,136594 +NULL,53,150872 +NULL,54,166636 +NULL,55,184040 +NULL,56,203254 +NULL,57,224466 +NULL,58,247886 +NULL,59,273742 +NULL,60,302288 +NULL,61,333804 +NULL,62,368599 +NULL,63,407015 +NULL,64,449428 +NULL,65,496254 +NULL,66,547953 +NULL,67,605032 +NULL,68,668051 +NULL,69,737627 +NULL,70,814445 +NULL,71,899257 +NULL,72,992895 +NULL,73,1096278 +NULL,74,1210421 +NULL,75,1336443 +NULL,76,1475581 +NULL,77,1629200 +NULL,78,1798808 +NULL,79,1986068 +NULL,80,2192818 +NULL,81,2421087 +NULL,82,2673114 +NULL,83,2951373 +NULL,84,3258594 +NULL,85,3597792 +NULL,86,3972294 +NULL,87,4385776 +NULL,88,4842295 +NULL,89,5346332 +NULL,90,5902831 +NULL,91,6517253 +NULL,92,7195629 +NULL,93,7944614 +NULL,94,8771558 +NULL,95,9684577 +NULL,96,10692629 +NULL,97,11805606 +NULL,98,13034431 +NULL,99,14500000 +NULL,100,16500000 diff --git a/Database/progress/database.db b/Database/progress/database.db new file mode 100644 index 0000000000000000000000000000000000000000..038a4defe905c8bb39caeca08cb0aa1dbeb3fddf GIT binary patch literal 32768 zcmeI4du&tJ8Nlzgoy6zdS3+VO6GCqwfrO{8-{zSIAxWEwHH(5f;qHmR%Dv3Bbww1-%`PO1uQl}1;pO%u{Kb?1DJM;r)$ z^pA;5-%U>L?>qOL!}tAs?dZnl4)k>=bC!N6of$K8x?d<4L{VtgbwLm=vgTyX&RQvJ zH(7JAR@lS>JYauOSo^uF3L7A57XX&Y)dBe zc>7>{H3S(eJDJMqU9ot3M|*$1FF8IjnKO#}4(E=~>VZ+ddq&Obfl>Z@M$PJhQGt6# z{kMlU2|~$9x7#gFH9@J_k&HP3Kb&`UutK40e%I-E!ChG?PBlQCV^)HFmlf5>I_7tU z_8+=yYsD!mU!j?^GD$PVt|(w%T3qXN?*j_HdoE9(w|6_{JZX!D-2aR%8seYvjF~=H zi;f3o9M7fGDPvabn-%BR<7c()yKa|DoLT`c9=FU4ZzV+zc+Ugsp9fD}@#iy&Jd{o+ zcykmL$$RE^k;2UxMVez{Y2NHbMe?5iQIP{;8GjjhMu8K{IyS++XZ+@1FO{t0ez~NM z6u%6!QPXD_ZQ|GF3F(-i-O>K4-O%3Eu4-4bA8D^>FKaJor?oF=Pilv?L)u=gS8LbS zYwNUn&7-NBUA?1zq<)}YQ{Pr!SHG`bP|vGhRZpo;t2uR4-KX}dTh)lVTGiDm)v5eT zxvkt#ey_Z({8)Kad09E9Jg=NkjwvZ+NQo<*%0|Vj)GO7BtO)YoXdpWAk}sxDXBtSFXRNn1#?3US0N* zGE@kMgcloK%E3Z-P&jkNR1Orv1H#E`f0FmJ(9Zv%{l}v($@>b?zOi3?B=0Rmdq*-Q z@}5GpXHUh)^6o;kyYK8`d9V--c3lt4y9&{+*1z1Acd|%MI4VVl;q^i}(uku%bVQ?2 zj5G{E1WJ)i2*OZ^Gz38i%8(915QHM60}uqD1iGIE_Nbp1K=%Rrc=~iNu)&k3dw{(> zb-J5#B+3(~gTN7Kv*U!y(1;hyUE%4s)nsC(hm zzv*MZ!N;l+pQn!k2OeE;EJ(Y6{axjkchXK^U#GVGY1#p7bSRg8K(_&Vx5;B;w4HM( z+Ag*1rCWg`Td8q|wgHFR$c9nc3LI*6?mJ9dfP*b%r=OzDz=7t{Kb)mofc;xagMX%* zfqk1xzHyCi0yZ|4xNp&oz}}6Hb64pG&cWyg$C?;j4;)!+Fl_tPhcpBn2-!~EqCsGP(AG0Z1Hit3&2^gkfepWSU8X)@uTQ+#MGek@s3Cso zchn0U@rsAK=sMu=I&teZx)wOJR%|#**8m6Ch~&$3HE>|HaJ!SP0`{*G-ZAM)VBbpN zjTdPXu+b!Z?;Y9*>}?dz|AwyM?2oPxzA{Re14ot%Qxl|tvCSWDIPxZ`2MpCe(MOg6 z2A2(eOqK!$mhO6v)B*bIwhKfD^ywWP*aa1kk%A`U$D!=!@0{{A4j;WbxWh zNey7QW@RT?1Q=Sh+(tZrLC?~9QVkfWUh+-i2K2jYa%3T(Z(;Qr;sP{Wt~;a(&|6jc zbyCUEh*nk{AqxN_3$&A@0x(>m{DhPPhRWqtL<0LnD=OG{!-;?P7}lsKMsIypt0_P3m6oWf=HI%g@TP^s;dvxHNy#3nc$oB|H< zgwxK+Zx^eaHcmd9@SRf8p%KxDPycc#VSE1GKHu~A`pa13@BQ0Y!wn6f0W^RH&;S}h z184vZpaC?12G9T+mRPRoBb|AwI5mfCkV28bAYR01co4G=K)s02)98 zXkZ>2D7yal-+TShe^Q8sf@t0k1dQKRG;XWws|%y(u%BOOEh+SaOI`JoPW zbHeOE&azTAw#e+616EE?AJVraQ&zUs8Xpo=3TMm-*7fn@54l`-1V6BBGVGwn{h>T2>3H>u^*VF6#OnM!imObnABAI(^10vxuaCdZQF zlVe3I6jp1osV-Lysh#og8NBl8N3JC?YGsp0titg7?Kg?+5oEQ@Si;+yF|%1NC!hTN zrWN||^ZyUi(;0I?184vZpaC?12G9T+Km%w14WI!u@YxL%z5mZI{PF+)e|DKT6AhpN hG=K)s02)98XaEhM0W^RH(7;1#py>KPTKxI{{{W-Y!P)=- literal 0 HcmV?d00001 diff --git a/Database/items/createTables.py b/Database/progress/update.py similarity index 62% rename from Database/items/createTables.py rename to Database/progress/update.py index 0660c275..63cdfb83 100644 --- a/Database/items/createTables.py +++ b/Database/progress/update.py @@ -2,6 +2,11 @@ import sqlite3 import os import csv +# starting_ids.py + +starting_ids = { + "exp": 1, +} def get_data_type(value): try: @@ -76,3 +81,47 @@ for csv_file in csv_files: # Commit the changes and close the connection conn.commit() conn.close() + + +# Connect to the SQLite database +conn = sqlite3.connect("database.db") +cursor = conn.cursor() + +# Define the directory where the CSV files are located +csv_directory = "data" # Change this to your directory path + + +# Function to load data from a CSV file into a table +def load_csv_data(csv_path, table_name, cursor, starting_id): + # Delete existing data in the table + delete_query = f"DELETE FROM {table_name}" + cursor.execute(delete_query) + + with open(csv_path, newline="") as csvfile: + csv_reader = csv.reader(csvfile) + next(csv_reader) # Skip the header row if it exists in the CSV + for row in csv_reader: + # Exclude the first column (id) from the row + values = row[1:] + placeholders = ", ".join(["?"] * len(values)) + insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})" + cursor.execute(insert_query, [starting_id] + values) + starting_id += 1 + + +# Get a list of CSV files in the data directory +csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")] + +# Loop through the CSV files and load data into respective tables +for csv_file in csv_files: + table_name = os.path.splitext(csv_file)[ + 0 + ] # Remove the file extension to get the table name + csv_path = os.path.join(csv_directory, csv_file) + if table_name in starting_ids: + starting_id = starting_ids[table_name] + load_csv_data(csv_path, table_name, cursor, starting_id) + +# Commit the changes and close the connection +conn.commit() +conn.close() \ No newline at end of file diff --git a/Documents/crafting.md b/Documents/crafting.md index e68d67a2..c0aab749 100644 --- a/Documents/crafting.md +++ b/Documents/crafting.md @@ -10,6 +10,8 @@ ## Crafting +- recepie required to craft certain parts? similar to FO4? Some recepies are only aquired with a certain task completion? + ### Medicine - med crafting (lab) diff --git a/Documents/progress.md b/Documents/progress.md new file mode 100644 index 00000000..e69de29b diff --git a/README.md b/README.md index 3bf0791c..77d1e5a9 100644 --- a/README.md +++ b/README.md @@ -114,8 +114,9 @@ In the event that items despawn, the player can buy them back from an NPC for 3/ #### Progression +- ##### [Documentation](https://git.happytavern.co/OceanSlim/projectEli/src/branch/master/Documents/progress.md) - 1-10 prestiege system, - - once the last task is complete, players are given the option to restart, lose all progress and items and prestiege. + - once the last story task is complete, players are given the option to restart, lose all progress and items and prestiege. #### Crafting - ##### [Documentation](https://git.happytavern.co/OceanSlim/projectEli/src/branch/master/Documents/crafting.md)