change to simple_salesforce for data load

This commit is contained in:
Rene Kaßeböhmer
2025-04-08 16:02:58 +02:00
parent 5eb314fbf7
commit 2e6d82d9cc
5 changed files with 188 additions and 11 deletions

View File

@ -5,8 +5,10 @@ country_mapping = {
}
# Read the input CSV file, assuming the second row is the header
read_df = pd.read_csv('../1_extract_data/SCInstalledBaseLocation__c.csv', header=0, keep_default_na=False, dtype=str)
read_df_ib = pd.read_csv('../1_extract_data/SCInstalledBase__c.csv', header=0, keep_default_na=False, dtype=str)
read_df = pd.read_csv('../1_extract_data/target/SCInstalledBaseLocation__c_upsert_target.csv', header=0, keep_default_na=False, dtype=str)
read_df_ib = pd.read_csv('../1_extract_data/target/object-set-2/SCInstalledBase__c_upsert_target.csv', header=0, keep_default_na=False, dtype=str)
read_df_product2 = pd.read_csv('../1_extract_data/target/object-set-6/Product2_upsert_target.csv', header=0, keep_default_na=False, dtype=str)
for row in read_df.to_dict('records'):
try:
# Your processing logic here
@ -18,10 +20,13 @@ for row in read_df.to_dict('records'):
reindex_columns = ['City__c','Country__c','Extension__c','FlatNo__c','Floor__c','GeoX__c','GeoY__c','HouseNo__c','Id','PostalCode__c','Street__c']
# ArticleNo__c,CommissioningDate__c,Id,InstallationDate__c,InstalledBaseLocation__c,InstalledBaseLocation__r.Id,Name,ProductEnergy__c,ProductUnitClass__c,ProductUnitType__c,SerialNo__c,SerialNoException__c
reindex_columns_ib = ['ArticleNo__c','CommissioningDate__c','Id','InstallationDate__c','InstalledBaseLocation__c','InstalledBaseLocation__r.Id','Name','ProductEnergy__c','ProductUnitClass__c','ProductUnitType__c','SerialNo__c','SerialNoException__c']
# EAN_Product_Code__c,Family,Id,Main_Product_Group__c,MaterialType__c,Name,Product_Code__c,ProductCode
reindex_columns_product2 = ['EAN_Product_Code__c','Family','Id','Main_Product_Group__c','MaterialType__c','Name','Product_Code__c','ProductCode']
# Reindex the columns to match the desired format
df = read_df.reindex(reindex_columns, axis=1)
df_ib = read_df_ib.reindex(reindex_columns_ib, axis=1)
df_product2 = read_df_product2.reindex(reindex_columns_product2, axis=1)
df['Street'] = (
df['Street__c'].astype(str) + ' ' +
@ -151,7 +156,20 @@ merged_df_ib = merged_df_ib.drop('InstalledBaseLocation__c', axis=1)
merged_df_ib = merged_df_ib.drop('InstalledBaseLocation__r.Id', axis=1)
merged_df_ib = merged_df_ib.drop('Id_y', axis=1)
print(merged_df_ib.columns)
merged_df_ib.columns = ['Product2.EAN_Product_Code__c', 'FSL_1st_Ignition_Date__c', 'Id', 'InstallDate', 'Name', 'Kind_of_Energy__c', 'Kind_of_Installation__c', 'Main_Product_Group__c', 'SerialNumber', 'Serialnumber_Exception__c', 'Location.ExternalReference']
merged_df_ib.columns = ['Product2.Product_Code__c', 'FSL_1st_Ignition_Date__c', 'Id', 'InstallDate', 'Name', 'Kind_of_Energy__c', 'Kind_of_Installation__c', 'Main_Product_Group__c', 'SerialNumber', 'Serialnumber_Exception__c', 'Location.ExternalReference']
merged_df_ib = merged_df_ib.drop('Main_Product_Group__c', axis=1)
# assign Main_Product_Group__c based on product2 records
merged_df_ib = pd.merge(merged_df_ib,
df_product2[['Product_Code__c', 'Main_Product_Group__c']],
left_on='Product2.Product_Code__c',
right_on='Product_Code__c',
how='left')
merged_df_ib = merged_df_ib.drop('Product_Code__c', axis=1)
merged_df_ib = merged_df_ib.drop_duplicates(subset=['Id'], keep='first')
# Write each DataFrame to a separate CSV file
address_df.to_csv('../3_upsert_address_and_parent_location/Address.csv', index=False)
@ -159,4 +177,6 @@ parent_df.to_csv('../3_upsert_address_and_parent_location/Location.csv', index=F
child_df.to_csv('../5_upsert_child_location/Location.csv', index=False)
merged_df_ib.to_csv('../7_upsert_assets/Asset.csv', index=False)
## end mapping
print('Data has been successfully split into Address.csv, Parent_Location.csv, and Child_Location.csv files with duplicate checks applied.')