added progress db, updated database update logic for all dbs

This commit is contained in:
0ceanSlim 2023-10-12 08:35:36 -04:00
parent c64922e5f3
commit a83520be2d
16 changed files with 343 additions and 159 deletions

View File

@ -1,10 +0,0 @@
# starting_ids.py
starting_ids = {
"ammo": 1000,
"food": 2000,
"gear": 3000,
"material": 4000,
"weapon":5000,
"scrap": 9000,
}

View File

@ -1,47 +0,0 @@
import csv
import os
import sqlite3
from id import starting_ids
# Connect to the SQLite database
conn = sqlite3.connect("database.db")
cursor = conn.cursor()
# Define the directory where the CSV files are located
csv_directory = "data" # Change this to your directory path
# Function to load data from a CSV file into a table
def load_csv_data(csv_path, table_name, cursor, starting_id):
# Delete existing data in the table
delete_query = f"DELETE FROM {table_name}"
cursor.execute(delete_query)
with open(csv_path, newline="") as csvfile:
csv_reader = csv.reader(csvfile)
next(csv_reader) # Skip the header row if it exists in the CSV
for row in csv_reader:
# Exclude the first column (id) from the row
values = row[1:]
placeholders = ", ".join(["?"] * len(values))
insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})"
cursor.execute(insert_query, [starting_id] + values)
starting_id += 1
# Get a list of CSV files in the data directory
csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")]
# Loop through the CSV files and load data into respective tables
for csv_file in csv_files:
table_name = os.path.splitext(csv_file)[
0
] # Remove the file extension to get the table name
csv_path = os.path.join(csv_directory, csv_file)
if table_name in starting_ids:
starting_id = starting_ids[table_name]
load_csv_data(csv_path, table_name, cursor, starting_id)
# Commit the changes and close the connection
conn.commit()
conn.close()

View File

@ -1,21 +0,0 @@
import subprocess
# Define the paths to your Python scripts
create_tables_script = "createTables.py"
import_data_script = "importData.py"
# Run the createTables.py script
try:
subprocess.run(["python", create_tables_script], check=True)
print("createTables.py script executed successfully.")
except subprocess.CalledProcessError:
print("Error running createTables.py script.")
exit(1)
# Run the importData.py script
try:
subprocess.run(["python", import_data_script], check=True)
print("importData.py script executed successfully.")
except subprocess.CalledProcessError:
print("Error running importData.py script.")
exit(1)

View File

@ -2,6 +2,17 @@ import sqlite3
import os
import csv
# starting_ids.py
starting_ids = {
"ammo": 1000,
"food": 2000,
"gear": 3000,
"material": 4000,
"weapon":5000,
"scrap": 9000,
}
def get_data_type(value):
try:
@ -76,3 +87,47 @@ for csv_file in csv_files:
# Commit the changes and close the connection
conn.commit()
conn.close()
# Connect to the SQLite database
conn = sqlite3.connect("database.db")
cursor = conn.cursor()
# Define the directory where the CSV files are located
csv_directory = "data" # Change this to your directory path
# Function to load data from a CSV file into a table
def load_csv_data(csv_path, table_name, cursor, starting_id):
# Delete existing data in the table
delete_query = f"DELETE FROM {table_name}"
cursor.execute(delete_query)
with open(csv_path, newline="") as csvfile:
csv_reader = csv.reader(csvfile)
next(csv_reader) # Skip the header row if it exists in the CSV
for row in csv_reader:
# Exclude the first column (id) from the row
values = row[1:]
placeholders = ", ".join(["?"] * len(values))
insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})"
cursor.execute(insert_query, [starting_id] + values)
starting_id += 1
# Get a list of CSV files in the data directory
csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")]
# Loop through the CSV files and load data into respective tables
for csv_file in csv_files:
table_name = os.path.splitext(csv_file)[
0
] # Remove the file extension to get the table name
csv_path = os.path.join(csv_directory, csv_file)
if table_name in starting_ids:
starting_id = starting_ids[table_name]
load_csv_data(csv_path, table_name, cursor, starting_id)
# Commit the changes and close the connection
conn.commit()
conn.close()

View File

@ -1,12 +0,0 @@
# starting_ids.py
# items database
starting_ids = {
"reload": 20000,
"ammo": 10000,
"craft": 20000,
"gear": 30000,
"part": 40000,
"med": 50000,
"food": 8000,
}

View File

@ -1,47 +0,0 @@
import csv
import os
import sqlite3
from id import starting_ids
# Connect to the SQLite database
conn = sqlite3.connect("database.db")
cursor = conn.cursor()
# Define the directory where the CSV files are located
csv_directory = "data" # Change this to your directory path
# Function to load data from a CSV file into a table
def load_csv_data(csv_path, table_name, cursor, starting_id):
# Delete existing data in the table
delete_query = f"DELETE FROM {table_name}"
cursor.execute(delete_query)
with open(csv_path, newline="") as csvfile:
csv_reader = csv.reader(csvfile)
next(csv_reader) # Skip the header row if it exists in the CSV
for row in csv_reader:
# Exclude the first column (id) from the row
values = row[1:]
placeholders = ", ".join(["?"] * len(values))
insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})"
cursor.execute(insert_query, [starting_id] + values)
starting_id += 1
# Get a list of CSV files in the data directory
csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")]
# Loop through the CSV files and load data into respective tables
for csv_file in csv_files:
table_name = os.path.splitext(csv_file)[
0
] # Remove the file extension to get the table name
csv_path = os.path.join(csv_directory, csv_file)
if table_name in starting_ids:
starting_id = starting_ids[table_name]
load_csv_data(csv_path, table_name, cursor, starting_id)
# Commit the changes and close the connection
conn.commit()
conn.close()

View File

@ -1,21 +0,0 @@
import subprocess
# Define the paths to your Python scripts
create_tables_script = "createTables.py"
import_data_script = "importData.py"
# Run the createTables.py script
try:
subprocess.run(["python", create_tables_script], check=True)
print("createTables.py script executed successfully.")
except subprocess.CalledProcessError:
print("Error running createTables.py script.")
exit(1)
# Run the importData.py script
try:
subprocess.run(["python", import_data_script], check=True)
print("importData.py script executed successfully.")
except subprocess.CalledProcessError:
print("Error running importData.py script.")
exit(1)

134
Database/items/update.py Normal file
View File

@ -0,0 +1,134 @@
import sqlite3
import os
import csv
# starting_ids.py
# items database
starting_ids = {
"reload": 20000,
"ammo": 10000,
"craft": 20000,
"gear": 30000,
"part": 40000,
"med": 50000,
"food": 8000,
}
def get_data_type(value):
try:
int(value)
return "INTEGER"
except ValueError:
try:
float(value)
return "REAL"
except ValueError:
return "TEXT"
# Connect to the SQLite database and delete existing tables
conn = sqlite3.connect("database.db")
cursor = conn.cursor()
# Get a list of CSV files in the "data" directory
data_dir = "data" # Change this to your data directory path
csv_files = [f for f in os.listdir(data_dir) if f.endswith(".csv")]
# Drop all existing tables except for sqlite_sequence
cursor.execute("PRAGMA foreign_keys = OFF;")
cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_sequence';"
)
existing_tables = cursor.fetchall()
for table in existing_tables:
cursor.execute(f"DROP TABLE IF EXISTS {table[0]};")
# Commit the changes to delete existing tables
conn.commit()
# Iterate through CSV files and create new tables
for csv_file in csv_files:
table_name = os.path.splitext(csv_file)[0]
# Read the first row of the CSV file to determine the column names
with open(os.path.join(data_dir, csv_file), newline="") as csvfile:
csv_reader = csv.reader(csvfile)
header = next(csv_reader)
# Read the second row to determine data types
with open(os.path.join(data_dir, csv_file), newline="") as csvfile:
csv_reader = csv.reader(csvfile)
next(csv_reader) # Skip the header row
data_row = next(csv_reader)
data_types = [get_data_type(value) for value in data_row]
# Add a primary key column if needed (replace 'unique_id' with your unique identifier column name)
if "unique_id" in header:
header[header.index("unique_id")] += " PRIMARY KEY"
# Generate the CREATE TABLE statement dynamically based on the column names and data types
create_table_sql = f"CREATE TABLE IF NOT EXISTS {table_name} (\n"
for column_name, data_type in zip(header, data_types):
create_table_sql += f" {column_name} {data_type},\n"
create_table_sql = create_table_sql.rstrip(",\n") + "\n);"
# Execute the CREATE TABLE statement
cursor.execute(create_table_sql)
# Read and insert data from the CSV file into the table
with open(os.path.join(data_dir, csv_file), newline="") as csvfile:
csv_reader = csv.reader(csvfile)
next(csv_reader) # Skip the header row
for row in csv_reader:
placeholders = ",".join(["?"] * len(row))
insert_sql = f"INSERT INTO {table_name} VALUES ({placeholders});"
cursor.execute(insert_sql, row)
# Commit the changes and close the connection
conn.commit()
conn.close()
# Connect to the SQLite database
conn = sqlite3.connect("database.db")
cursor = conn.cursor()
# Define the directory where the CSV files are located
csv_directory = "data" # Change this to your directory path
# Function to load data from a CSV file into a table
def load_csv_data(csv_path, table_name, cursor, starting_id):
# Delete existing data in the table
delete_query = f"DELETE FROM {table_name}"
cursor.execute(delete_query)
with open(csv_path, newline="") as csvfile:
csv_reader = csv.reader(csvfile)
next(csv_reader) # Skip the header row if it exists in the CSV
for row in csv_reader:
# Exclude the first column (id) from the row
values = row[1:]
placeholders = ", ".join(["?"] * len(values))
insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})"
cursor.execute(insert_query, [starting_id] + values)
starting_id += 1
# Get a list of CSV files in the data directory
csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")]
# Loop through the CSV files and load data into respective tables
for csv_file in csv_files:
table_name = os.path.splitext(csv_file)[
0
] # Remove the file extension to get the table name
csv_path = os.path.join(csv_directory, csv_file)
if table_name in starting_ids:
starting_id = starting_ids[table_name]
load_csv_data(csv_path, table_name, cursor, starting_id)
# Commit the changes and close the connection
conn.commit()
conn.close()

View File

@ -0,0 +1,101 @@
id,lv,xp
NULL,1,84
NULL,2,174
NULL,3,276
NULL,4,388
NULL,5,512
NULL,6,650
NULL,7,801
NULL,8,969
NULL,9,1154
NULL,10,1358
NULL,11,1584
NULL,12,1833
NULL,13,2107
NULL,14,2411
NULL,15,2746
NULL,16,3115
NULL,17,3523
NULL,18,3973
NULL,19,4470
NULL,20,5018
NULL,21,5624
NULL,22,6291
NULL,23,7028
NULL,24,7842
NULL,25,8740
NULL,26,9730
NULL,27,10824
NULL,28,12031
NULL,29,13363
NULL,30,14833
NULL,31,16456
NULL,32,18247
NULL,33,20224
NULL,34,22406
NULL,35,24815
NULL,36,27473
NULL,37,30408
NULL,38,33648
NULL,39,37224
NULL,40,41171
NULL,41,45529
NULL,42,50339
NULL,43,55649
NULL,44,61512
NULL,45,67983
NULL,46,75127
NULL,47,83014
NULL,48,91721
NULL,49,101333
NULL,50,111945
NULL,51,123660
NULL,52,136594
NULL,53,150872
NULL,54,166636
NULL,55,184040
NULL,56,203254
NULL,57,224466
NULL,58,247886
NULL,59,273742
NULL,60,302288
NULL,61,333804
NULL,62,368599
NULL,63,407015
NULL,64,449428
NULL,65,496254
NULL,66,547953
NULL,67,605032
NULL,68,668051
NULL,69,737627
NULL,70,814445
NULL,71,899257
NULL,72,992895
NULL,73,1096278
NULL,74,1210421
NULL,75,1336443
NULL,76,1475581
NULL,77,1629200
NULL,78,1798808
NULL,79,1986068
NULL,80,2192818
NULL,81,2421087
NULL,82,2673114
NULL,83,2951373
NULL,84,3258594
NULL,85,3597792
NULL,86,3972294
NULL,87,4385776
NULL,88,4842295
NULL,89,5346332
NULL,90,5902831
NULL,91,6517253
NULL,92,7195629
NULL,93,7944614
NULL,94,8771558
NULL,95,9684577
NULL,96,10692629
NULL,97,11805606
NULL,98,13034431
NULL,99,14500000
NULL,100,16500000
1 id lv xp
2 NULL 1 84
3 NULL 2 174
4 NULL 3 276
5 NULL 4 388
6 NULL 5 512
7 NULL 6 650
8 NULL 7 801
9 NULL 8 969
10 NULL 9 1154
11 NULL 10 1358
12 NULL 11 1584
13 NULL 12 1833
14 NULL 13 2107
15 NULL 14 2411
16 NULL 15 2746
17 NULL 16 3115
18 NULL 17 3523
19 NULL 18 3973
20 NULL 19 4470
21 NULL 20 5018
22 NULL 21 5624
23 NULL 22 6291
24 NULL 23 7028
25 NULL 24 7842
26 NULL 25 8740
27 NULL 26 9730
28 NULL 27 10824
29 NULL 28 12031
30 NULL 29 13363
31 NULL 30 14833
32 NULL 31 16456
33 NULL 32 18247
34 NULL 33 20224
35 NULL 34 22406
36 NULL 35 24815
37 NULL 36 27473
38 NULL 37 30408
39 NULL 38 33648
40 NULL 39 37224
41 NULL 40 41171
42 NULL 41 45529
43 NULL 42 50339
44 NULL 43 55649
45 NULL 44 61512
46 NULL 45 67983
47 NULL 46 75127
48 NULL 47 83014
49 NULL 48 91721
50 NULL 49 101333
51 NULL 50 111945
52 NULL 51 123660
53 NULL 52 136594
54 NULL 53 150872
55 NULL 54 166636
56 NULL 55 184040
57 NULL 56 203254
58 NULL 57 224466
59 NULL 58 247886
60 NULL 59 273742
61 NULL 60 302288
62 NULL 61 333804
63 NULL 62 368599
64 NULL 63 407015
65 NULL 64 449428
66 NULL 65 496254
67 NULL 66 547953
68 NULL 67 605032
69 NULL 68 668051
70 NULL 69 737627
71 NULL 70 814445
72 NULL 71 899257
73 NULL 72 992895
74 NULL 73 1096278
75 NULL 74 1210421
76 NULL 75 1336443
77 NULL 76 1475581
78 NULL 77 1629200
79 NULL 78 1798808
80 NULL 79 1986068
81 NULL 80 2192818
82 NULL 81 2421087
83 NULL 82 2673114
84 NULL 83 2951373
85 NULL 84 3258594
86 NULL 85 3597792
87 NULL 86 3972294
88 NULL 87 4385776
89 NULL 88 4842295
90 NULL 89 5346332
91 NULL 90 5902831
92 NULL 91 6517253
93 NULL 92 7195629
94 NULL 93 7944614
95 NULL 94 8771558
96 NULL 95 9684577
97 NULL 96 10692629
98 NULL 97 11805606
99 NULL 98 13034431
100 NULL 99 14500000
101 NULL 100 16500000

Binary file not shown.

View File

@ -2,6 +2,11 @@ import sqlite3
import os
import csv
# starting_ids.py
starting_ids = {
"exp": 1,
}
def get_data_type(value):
try:
@ -76,3 +81,47 @@ for csv_file in csv_files:
# Commit the changes and close the connection
conn.commit()
conn.close()
# Connect to the SQLite database
conn = sqlite3.connect("database.db")
cursor = conn.cursor()
# Define the directory where the CSV files are located
csv_directory = "data" # Change this to your directory path
# Function to load data from a CSV file into a table
def load_csv_data(csv_path, table_name, cursor, starting_id):
# Delete existing data in the table
delete_query = f"DELETE FROM {table_name}"
cursor.execute(delete_query)
with open(csv_path, newline="") as csvfile:
csv_reader = csv.reader(csvfile)
next(csv_reader) # Skip the header row if it exists in the CSV
for row in csv_reader:
# Exclude the first column (id) from the row
values = row[1:]
placeholders = ", ".join(["?"] * len(values))
insert_query = f"INSERT INTO {table_name} VALUES (?, {placeholders})"
cursor.execute(insert_query, [starting_id] + values)
starting_id += 1
# Get a list of CSV files in the data directory
csv_files = [f for f in os.listdir(csv_directory) if f.endswith(".csv")]
# Loop through the CSV files and load data into respective tables
for csv_file in csv_files:
table_name = os.path.splitext(csv_file)[
0
] # Remove the file extension to get the table name
csv_path = os.path.join(csv_directory, csv_file)
if table_name in starting_ids:
starting_id = starting_ids[table_name]
load_csv_data(csv_path, table_name, cursor, starting_id)
# Commit the changes and close the connection
conn.commit()
conn.close()

View File

@ -10,6 +10,8 @@
## Crafting
- recepie required to craft certain parts? similar to FO4? Some recepies are only aquired with a certain task completion?
### Medicine
- med crafting (lab)

0
Documents/progress.md Normal file
View File

View File

@ -114,8 +114,9 @@ In the event that items despawn, the player can buy them back from an NPC for 3/
#### Progression
- ##### [Documentation](https://git.happytavern.co/OceanSlim/projectEli/src/branch/master/Documents/progress.md)
- 1-10 prestiege system,
- once the last task is complete, players are given the option to restart, lose all progress and items and prestiege.
- once the last story task is complete, players are given the option to restart, lose all progress and items and prestiege.
#### Crafting
- ##### [Documentation](https://git.happytavern.co/OceanSlim/projectEli/src/branch/master/Documents/crafting.md)