EECE571F-project / ml4co-competition / common / evaluate.py
evaluate.py
Raw
import argparse
import csv
import json
import pathlib

import ecole as ec
import numpy as np
import sys
import torch
import random

def eval(env, instance, initial_primal_bound, policy):
    # reset the environment
    observation, action_set, reward, done, info = env.reset(instance, objective_limit=initial_primal_bound)

    cumulated_reward = 0  # discard initial reward

    # loop over the environment
    while not done:
        action, _ = policy(action_set, observation)
        observation, action_set, reward, done, info = env.step(action.item())
        cumulated_reward += reward

    return info, cumulated_reward


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'problem',
        help='Problem benchmark to process.',
        choices=['item_placement', 'capacitated_facility_location', 'set_cover'],
    )
    parser.add_argument(
        '-t', '--timelimit',
        help='Episode time limit (in seconds).',
        default=argparse.SUPPRESS,
        type=float,
    )

    args = parser.parse_args()
    args.task = 'dual'
    args.problem = 'set_cover'
    seed = 6 # 4,5,6,7,8
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    # check the Ecole version installed
    print(ec.__version__)

    print(f"Evaluating the {args.task} task agent on the {args.problem} problem.")

    # collect the instance files
    if args.problem == 'item_placement':
        instances_path = pathlib.Path(f"instances/1_item_placement/test/")
        instances = list(instances_path.glob('*.mps.gz'))
   
    elif args.problem == 'capacitated_facility_location':
        import ecole
        instances = ecole.instance.CapacitatedFacilityLocationGenerator(n_customers=200, n_facilities=50)
        instances.seed(seed)

    
    elif args.problem == 'set_cover':
        import ecole
        instances = ecole.instance.SetCoverGenerator(n_rows=700, n_cols=500, density=0.05)
        instances.seed(seed)
    
    rl_primal = pathlib.Path(f"submissions/{args.task}/primal/hard_{args.problem}_seed_{seed}.csv")
    rl_nodes = pathlib.Path(f"submissions/{args.task}/nodes/hard_{args.problem}_seed_{seed}.csv")
    
    # rl_file = pathlib.Path(f"submissions/{args.task}/rl/hard_{args.problem}_seed_{seed}.csv")
    # gcn_file = pathlib.Path(f"submissions/{args.task}/gcn/hard_{args.problem}_seed_{seed}.csv")
    # expert_file = pathlib.Path(f"submissions/{args.task}/expert/hard_{args.problem}_seed_{seed}.csv")
    # strong_branch_file = pathlib.Path(f"submissions/{args.task}/sb/medium_{args.problem}_seed_{seed}.csv")

    results_fieldnames = ['instance', 'instance_number', 'initial_primal_bound', 'initial_dual_bound', 'objective_offset', 'cumulated_reward', 'time', 'number_of_nodes', 'primal_bound', 'dual_bound', 'status']

    rl_primal.parent.mkdir(parents=True, exist_ok=True)
    with open(rl_primal, mode='w') as csv_file:
        primal_writer = csv.DictWriter(csv_file, fieldnames=results_fieldnames)
        primal_writer.writeheader()

    rl_nodes.parent.mkdir(parents=True, exist_ok=True)
    with open(rl_nodes, mode='w') as csv_file:
        node_writer = csv.DictWriter(csv_file, fieldnames=results_fieldnames)
        node_writer.writeheader()

    # expert_file.parent.mkdir(parents=True, exist_ok=True)
    # with open(expert_file, mode='w') as csv_file:
    #     expert_writer = csv.DictWriter(csv_file, fieldnames=results_fieldnames)
    #     expert_writer.writeheader()

    # strong_branch_file.parent.mkdir(parents=True, exist_ok=True)
    # with open(strong_branch_file, mode='w') as csv_file:
    #     strong_branch_writer = csv.DictWriter(csv_file, fieldnames=results_fieldnames)
    #     strong_branch_writer.writeheader()
    

    import sys
    sys.path.insert(1, str(pathlib.Path.cwd()))

    if args.task == "dual":
        from submissions.dual.agents.rl import Policy as RLPolicy
        from submissions.dual.agents.baseline import Policy as GCNPolicy
        from submissions.dual.agents.baseline import ObservationFunction 
        from common.environments import Branching as Environment
        from common.rewards import TimeLimitPrimalIntegral as BoundIntegral
        from common.environments import Configuring, StrongBrancher

        time_limit = 5*60
        memory_limit = 8796093022207  # maximum

    # override from command-line argument if provided
    # time_limit = getattr(args, "timelimit", time_limit)

    observation_function = ObservationFunction(problem=args.problem)
    # rl_policy = RLPolicy(problem=args.problem)
    # gcn_policy = GCNPolicy(problem=args.problem)

    primal_policy = RLPolicy(problem=args.problem, reward='primal')
    nodes_policy = RLPolicy(problem=args.problem, reward='nodes')

    # seed both the agent and the environment (deterministic behavior)
    
    # evaluation loop
    for p, instance in enumerate(instances):

        integral_function = BoundIntegral()

        primal_env = Environment(
            time_limit=time_limit,
            observation_function=observation_function,
            reward_function=-integral_function,  # negated integral (minimization)
            scip_params={'limits/memory': memory_limit},
        )

        # default SCIP solver

        nodes_env = Environment(
            time_limit=time_limit,
            observation_function=observation_function,
            reward_function=-ecole.reward.NNodes(),
            scip_params={'limits/memory': memory_limit},
        )
        # nodes_env = Configuring(
        #     time_limit=time_limit,
        #     observation_function=None,
        #     reward_function=-ecole.reward.NNodes(),
        #     scip_params={'limits/memory': memory_limit},
        # )

        # vanillafullstrong_env = StrongBrancher(
        #     time_limit=time_limit,
        #     observation_function=None,
        #     reward_function=-integral_function,
        #     scip_params={'limits/memory': memory_limit},
        # )

        observation_function.seed(seed)
        primal_policy.seed(seed)
        nodes_policy.seed(seed)
        primal_env.seed(seed)
        nodes_env.seed(seed)

        # vanillafullstrong_env.seed(seed)

        # set up the reward function parameters for that instance
        if args.problem == 'item_placement':
            with open(instance.with_name(instance.stem).with_suffix('.json')) as f:
                instance_info = json.load(f)
            initial_primal_bound = instance_info["primal_bound"]
            initial_dual_bound = instance_info["dual_bound"]
        else:
            initial_primal_bound = instance.primal_bound
            initial_dual_bound = instance.dual_bound
        
        objective_offset = 0
        integral_function.set_parameters(
                initial_primal_bound=initial_primal_bound,
                initial_dual_bound=initial_dual_bound,
                objective_offset=objective_offset)

        print()
        print(f"Instance {instance.name}")
        print(f"  instance number: {p}")
        print(f"  initial primal bound: {initial_primal_bound}")
        print(f"  initial dual bound: {initial_dual_bound}")
        print(f"  objective offset: {objective_offset}")

        info, cumulated_reward = eval(primal_env, instance, initial_primal_bound, primal_policy)

        # save instance results
        with open(rl_primal, mode='a') as csv_file:
            primal_writer = csv.DictWriter(csv_file, fieldnames=results_fieldnames)
            primal_writer.writerow({
                'instance': str(instance),
                'instance_number': p,
                'initial_primal_bound': initial_primal_bound,
                'initial_dual_bound': initial_dual_bound,
                'objective_offset': objective_offset,
                'cumulated_reward': cumulated_reward,
                'time': info['solvingtime'],
                'number_of_nodes': info['nnodes'], 
                'primal_bound': info['primal_bound'], 
                'dual_bound': info['dual_bound'], 
                'status': info['status']
            })
        
        info,  cumulated_reward = eval(nodes_env, instance, initial_primal_bound, nodes_policy)

        # save instance results
        with open(rl_nodes, mode='a') as csv_file:
            node_writer = csv.DictWriter(csv_file, fieldnames=results_fieldnames)
            node_writer.writerow({
                'instance': str(instance),
                'instance_number': p,
                'initial_primal_bound': initial_primal_bound,
                'initial_dual_bound': initial_dual_bound,
                'objective_offset': objective_offset,
                'cumulated_reward': cumulated_reward,
                'time': info['solvingtime'],
                'number_of_nodes': info['nnodes'], 
                'primal_bound': info['primal_bound'], 
                'dual_bound': info['dual_bound'], 
                'status': info['status']
            })

        # Solve using SCIP
        # relpscost_env.reset(instance)
        # _, _, cumulated_reward, _, default_info = relpscost_env.step({})
        # with open(expert_file, mode='a') as csv_file:
        #     expert_writer = csv.DictWriter(csv_file, fieldnames=results_fieldnames)
        #     expert_writer.writerow({
        #         'instance': str(instance),
        #         'instance_number': p,
        #         'initial_primal_bound': initial_primal_bound,
        #         'initial_dual_bound': initial_dual_bound,
        #         'objective_offset': objective_offset,
        #         'cumulated_reward': cumulated_reward,
        #         'time': default_info['solvingtime'],
        #         'number_of_nodes': default_info['nnodes'], 
        #         'primal_bound': default_info['primal_bound'], 
        #         'dual_bound': default_info['dual_bound'], 
        #         'status': default_info['status']
        #     })

        # vanillafullstrong_env.reset(instance)
        # _, _, cumulated_reward, _, default_info = vanillafullstrong_env.step({})
        # with open(strong_branch_file, mode='a') as csv_file:
        #     strong_branch_writer = csv.DictWriter(csv_file, fieldnames=results_fieldnames)
        #     strong_branch_writer.writerow({
        #         'instance': str(instance),
        #         'instance_number': p,
        #         'initial_primal_bound': initial_primal_bound,
        #         'initial_dual_bound': initial_dual_bound,
        #         'objective_offset': objective_offset,
        #         'cumulated_reward': cumulated_reward,
        #         'time': default_info['solvingtime'],
        #         'number_of_nodes': default_info['nnodes'], 
        #         'primal_bound': default_info['primal_bound'], 
        #         'dual_bound': default_info['dual_bound'], 
        #         'status': default_info['status']
        #     })

        if p == 9:
            print(f"finished evaluation.")
            break