WarpX
Variables
run_automated Namespace Reference

Variables

string machine = 'summit'
 
 parser = argparse.ArgumentParser( description='Run performance tests and write results in files' )
 
 dest
 
 action
 
 default
 
 help
 
 type
 
 choices
 
 args = parser.parse_args()
 
 n_node_list_string = args.n_node_list.split(',')
 
list n_node_list = [int(i) for i in n_node_list_string]
 
 start_date = args.start_date
 
string run_name = 'custom_perftest'
 
string perf_database_file = 'my_tests_database.h5'
 
bool rename_archive = False
 
bool store_full_input = False
 
bool update_perf_log_repo = False
 
bool push_on_perf_log_repo = False
 
 recompile = args.recompile
 
bool pull_3_repos = False
 
 compiler = args.compiler
 
 architecture = args.architecture
 
 source_dir_base = args.path_source
 
 res_dir_base = args.path_results
 
bool browse_output_files = False
 
bool browse_output_file = True
 
int n_repeat = 2
 
 test_list = get_test_list(n_repeat)
 
string warpx_dir = source_dir_base + '/warpx/'
 
string picsar_dir = source_dir_base + '/picsar/'
 
string amrex_dir = source_dir_base + '/amrex/'
 
string perf_logs_repo = source_dir_base + 'perf_logs/'
 
dictionary compiler_name = {'intel': 'intel', 'gnu': 'gcc', 'pgi':'pgi'}
 
dictionary module_Cname = {'cpu': 'haswell', 'knl': 'knl,quad,cache', 'gpu':''}
 
dictionary csv_file = {'cori':'cori_knl.csv', 'summit':'summit.csv'}
 
string cwd = warpx_dir + 'Tools/PerformanceTests/'
 
string path_hdf5 = cwd
 
string bin_dir = cwd + 'Bin/'
 
 bin_name = executable_name(compiler, architecture)
 
string log_dir = cwd
 
 day = time.strftime('%d')
 
 month = time.strftime('%m')
 
 year = time.strftime('%Y')
 
 config_command = get_config_command(compiler, architecture)
 
 git_repo = git.cmd.Git( picsar_dir )
 
string make_realclean_command = " make realclean WARPX_HOME=../.. " \
 
string make_command = "make -j 16 WARPX_HOME=../.. " \
 
 repo_path
 
 filename
 
 name
 
 res_dir = res_dir_base
 
list runtime_param_list = []
 
 test_list_n_node = copy.deepcopy(test_list)
 
 job_time_min = time_min(len(test_list))
 
 batch_string = get_batch_string(test_list_n_node, job_time_min, module_Cname[architecture], n_node)
 
string runtime_param_string = ' amr.n_cell=' + ' '.join(str(i) for i in current_run.n_cell)
 
 run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string)
 
 submit_job_command = get_submit_job_command()
 
string output_filename = 'out_' + '_'.join([current_run.input_file, str(n_node), str(current_run.n_mpi_per_node), str(current_run.n_omp), str(count)]) + '.txt'
 
 df_newline = extract_dataframe(res_dir + output_filename, current_run.n_step)
 
 df_base = pd.read_hdf(path_hdf5 + perf_database_file, 'all_data')
 
 updated_df = df_base.append(df_newline, ignore_index=True)
 
 key
 
 mode
 
 format
 
 index = git_repo.index
 
int loc_counter = 0
 
 res_dir_arch = res_dir_base
 

Variable Documentation

◆ action

run_automated.action

◆ amrex_dir

run_automated.amrex_dir = source_dir_base + '/amrex/'

◆ architecture

string run_automated.architecture = args.architecture

◆ args

run_automated.args = parser.parse_args()

◆ batch_string

run_automated.batch_string = get_batch_string(test_list_n_node, job_time_min, module_Cname[architecture], n_node)

◆ bin_dir

string run_automated.bin_dir = cwd + 'Bin/'

◆ bin_name

run_automated.bin_name = executable_name(compiler, architecture)

◆ browse_output_file

bool run_automated.browse_output_file = True

◆ browse_output_files

bool run_automated.browse_output_files = False

◆ choices

run_automated.choices

◆ compiler

string run_automated.compiler = args.compiler

◆ compiler_name

dictionary run_automated.compiler_name = {'intel': 'intel', 'gnu': 'gcc', 'pgi':'pgi'}

◆ config_command

run_automated.config_command = get_config_command(compiler, architecture)

◆ csv_file

dictionary run_automated.csv_file = {'cori':'cori_knl.csv', 'summit':'summit.csv'}

◆ cwd

string run_automated.cwd = warpx_dir + 'Tools/PerformanceTests/'

◆ day

run_automated.day = time.strftime('%d')

◆ default

run_automated.default

◆ dest

run_automated.dest

◆ df_base

run_automated.df_base = pd.read_hdf(path_hdf5 + perf_database_file, 'all_data')

◆ df_newline

run_automated.df_newline = extract_dataframe(res_dir + output_filename, current_run.n_step)

◆ filename

run_automated.filename

◆ format

run_automated.format

◆ git_repo

run_automated.git_repo = git.cmd.Git( picsar_dir )

◆ help

run_automated.help

◆ index

run_automated.index = git_repo.index

◆ job_time_min

run_automated.job_time_min = time_min(len(test_list))

◆ key

run_automated.key

◆ loc_counter

int run_automated.loc_counter = 0

◆ log_dir

string run_automated.log_dir = cwd

◆ machine

string run_automated.machine = 'summit'

◆ make_command

string run_automated.make_command = "make -j 16 WARPX_HOME=../.. " \

◆ make_realclean_command

string run_automated.make_realclean_command = " make realclean WARPX_HOME=../.. " \

◆ mode

run_automated.mode

◆ module_Cname

dictionary run_automated.module_Cname = {'cpu': 'haswell', 'knl': 'knl,quad,cache', 'gpu':''}

◆ month

run_automated.month = time.strftime('%m')

◆ n_node_list

list run_automated.n_node_list = [int(i) for i in n_node_list_string]

◆ n_node_list_string

run_automated.n_node_list_string = args.n_node_list.split(',')

◆ n_repeat

int run_automated.n_repeat = 2

◆ name

run_automated.name

◆ output_filename

string run_automated.output_filename = 'out_' + '_'.join([current_run.input_file, str(n_node), str(current_run.n_mpi_per_node), str(current_run.n_omp), str(count)]) + '.txt'

◆ parser

run_automated.parser = argparse.ArgumentParser( description='Run performance tests and write results in files' )

◆ path_hdf5

string run_automated.path_hdf5 = cwd

◆ perf_database_file

string run_automated.perf_database_file = 'my_tests_database.h5'

◆ perf_logs_repo

string run_automated.perf_logs_repo = source_dir_base + 'perf_logs/'

◆ picsar_dir

run_automated.picsar_dir = source_dir_base + '/picsar/'

◆ pull_3_repos

bool run_automated.pull_3_repos = False

◆ push_on_perf_log_repo

bool run_automated.push_on_perf_log_repo = False

◆ recompile

bool run_automated.recompile = args.recompile

◆ rename_archive

bool run_automated.rename_archive = False

◆ repo_path

run_automated.repo_path

◆ res_dir

run_automated.res_dir = res_dir_base

◆ res_dir_arch

run_automated.res_dir_arch = res_dir_base

◆ res_dir_base

string run_automated.res_dir_base = args.path_results

◆ run_name

string run_automated.run_name = 'custom_perftest'

◆ run_string

run_automated.run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string)

◆ runtime_param_list

list run_automated.runtime_param_list = []

◆ runtime_param_string

string run_automated.runtime_param_string = ' amr.n_cell=' + ' '.join(str(i) for i in current_run.n_cell)

◆ source_dir_base

run_automated.source_dir_base = args.path_source

◆ start_date

run_automated.start_date = args.start_date

◆ store_full_input

bool run_automated.store_full_input = False

◆ submit_job_command

run_automated.submit_job_command = get_submit_job_command()

◆ test_list

run_automated.test_list = get_test_list(n_repeat)

◆ test_list_n_node

run_automated.test_list_n_node = copy.deepcopy(test_list)

◆ type

run_automated.type

◆ update_perf_log_repo

bool run_automated.update_perf_log_repo = False

◆ updated_df

run_automated.updated_df = df_base.append(df_newline, ignore_index=True)

◆ warpx_dir

run_automated.warpx_dir = source_dir_base + '/warpx/'

◆ year

run_automated.year = time.strftime('%Y')