From a0c7d9aef5745e2fd73ab5a7dc75699f332666d3 Mon Sep 17 00:00:00 2001 From: HIGUCHI Yuta Date: Thu, 21 Feb 2013 12:32:38 +0900 Subject: [PATCH] Add event filter operation. (#2) --- src/topology/discovery_management.c | 48 +++-- src/topology/discovery_management.h | 4 +- src/topology/topology_main.c | 4 + src/topology/topology_management.c | 34 +++- .../topology/discovery_management_test.c | 152 +++++++++++++- unittests/topology/service_management_test.c | 133 ++++++++---- unittests/topology/topology_management_test.c | 189 +++++++++++++++++- 7 files changed, 493 insertions(+), 71 deletions(-) diff --git a/src/topology/discovery_management.c b/src/topology/discovery_management.c index b374745cf..4a12b4c31 100644 --- a/src/topology/discovery_management.c +++ b/src/topology/discovery_management.c @@ -62,20 +62,27 @@ start_discovery_management( void ){ static void -send_flow_mod_receiving_lldp( const sw_entry *sw, uint16_t hard_timeout, uint16_t priority, bool add ) { - struct ofp_match match; - memset( &match, 0, sizeof( struct ofp_match ) ); +set_match_for_lldp( struct ofp_match *match ) { + assert( match != NULL ); + memset( match, 0, sizeof( struct ofp_match ) ); if ( !options.lldp.lldp_over_ip ) { - match.wildcards = OFPFW_ALL & ~OFPFW_DL_TYPE; - match.dl_type = ETH_ETHTYPE_LLDP; + match->wildcards = OFPFW_ALL & ~OFPFW_DL_TYPE; + match->dl_type = ETH_ETHTYPE_LLDP; } else { - match.wildcards = OFPFW_ALL & ~( OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK ); - match.dl_type = ETH_ETHTYPE_IPV4; - match.nw_proto = IPPROTO_ETHERIP; - match.nw_src = options.lldp.lldp_ip_src; - match.nw_dst = options.lldp.lldp_ip_dst; + match->wildcards = OFPFW_ALL & ~( OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK ); + match->dl_type = ETH_ETHTYPE_IPV4; + match->nw_proto = IPPROTO_ETHERIP; + match->nw_src = options.lldp.lldp_ip_src; + match->nw_dst = options.lldp.lldp_ip_dst; } +} + + +static void +send_flow_mod_receiving_lldp( const sw_entry *sw, uint16_t hard_timeout, uint16_t priority, bool add ) { + struct ofp_match match; + set_match_for_lldp( &match ); openflow_actions *actions = create_actions(); const uint16_t max_len = UINT16_MAX; @@ -223,9 +230,17 @@ handle_packet_in( uint64_t dst_datapath_id, probe_request( entry, PROBE_TIMER_EVENT_RECV_LLDP, &dst_datapath_id, dst_port_no ); } +static char PACKET_IN[] = "packet_in"; +static void +handle_event_forward_entry_to_all_result( enum efi_result result, void *user_data ) { + if ( result == EFI_OPERATION_FAILED ) { + warn( "Registering/Unregistering topology to switch event '%s' failed.", ( const char * ) user_data ); + } +} + void -enable_discovery( void ) { +_enable_discovery( void ) { info( "Enabling topology discovery." ); if ( g_discovery_enabled ) { warn( "Topology Discovery is already enabled." ); @@ -237,16 +252,19 @@ enable_discovery( void ) { // start receiving packet-in set_packet_in_handler( handle_packet_in, NULL ); + // get event from all switches (directly) + add_event_forward_entry_to_all_switches( EVENT_FORWARD_TYPE_PACKET_IN, get_trema_name(), handle_event_forward_entry_to_all_result, PACKET_IN ); + set_switch_status_updated_hook( handle_switch_status_updated_callback, NULL ); set_port_status_updated_hook( handle_port_status_updated_callback, NULL ); // update all port status foreach_port_entry( port_entry_walker, NULL ); - } +void (* enable_discovery )( void ) = _enable_discovery; void -disable_discovery( void ) { +_disable_discovery( void ) { if ( options.always_enabled ) return; info( "Disabling topology discovery." ); if ( !g_discovery_enabled ) { @@ -257,6 +275,9 @@ disable_discovery( void ) { // stop receiving packet-in set_packet_in_handler( ignore_packet_in, NULL ); + // stop getting event from all switches (directly) + delete_event_forward_entry_to_all_switches( EVENT_FORWARD_TYPE_PACKET_IN, get_trema_name(), handle_event_forward_entry_to_all_result, PACKET_IN ); + // ignore switch/port events set_switch_status_updated_hook( NULL, NULL ); set_port_status_updated_hook( NULL, NULL ); @@ -264,4 +285,5 @@ disable_discovery( void ) { // remove LLDP flow entry foreach_sw_entry( switch_del_LLDP_flow_mods, NULL ); } +void (* disable_discovery )( void ) = _disable_discovery; diff --git a/src/topology/discovery_management.h b/src/topology/discovery_management.h index 9c559fc46..454ea64dd 100644 --- a/src/topology/discovery_management.h +++ b/src/topology/discovery_management.h @@ -35,8 +35,8 @@ bool start_discovery_management( void ); /** * Enable discovery. */ -void enable_discovery( void ); -void disable_discovery( void ); +extern void (* enable_discovery )( void ); +extern void (* disable_discovery )( void ); // TODO Future work: port masking API etc. diff --git a/src/topology/topology_main.c b/src/topology/topology_main.c index 017fdc335..c76c801fb 100644 --- a/src/topology/topology_main.c +++ b/src/topology/topology_main.c @@ -45,6 +45,8 @@ main( int argc, char *argv[] ) { init_openflow_application_interface( get_trema_name() ); + init_event_forward_interface(); + info( "Initializing topology services"); init_topology_table(); @@ -66,6 +68,8 @@ main( int argc, char *argv[] ) { finalize_topology_table(); + finalize_event_forward_interface(); + return 0; } diff --git a/src/topology/topology_management.c b/src/topology/topology_management.c index fb6edd869..0bb8b5c71 100644 --- a/src/topology/topology_management.c +++ b/src/topology/topology_management.c @@ -113,7 +113,12 @@ static void handle_switch_ready( uint64_t datapath_id, void *user_data ) { UNUSED( user_data ); - sw_entry *sw = update_sw_entry( &datapath_id ); + sw_entry *sw = lookup_sw_entry( &datapath_id ); + if ( sw != NULL ) { + warn( "Received switch-ready event, but switch(%#" PRIx64 ") already exists.", datapath_id ); + } else { + sw = update_sw_entry( &datapath_id ); + } sw->up = true; info( "Switch(%#" PRIx64 ") is connected.", datapath_id ); @@ -261,6 +266,30 @@ handle_port_status( uint64_t datapath_id, uint32_t transaction_id, uint8_t reaso } +static char PORT_STATUS[] = "port_status"; +static char STATE_NOTIFY[] = "state_notify"; +static void +handle_event_forward_entry_to_all_result( enum efi_result result, void *user_data ) { + if ( result == EFI_OPERATION_FAILED ) { + error( "Registering topology to switch event '%s' failed.", ( const char * ) user_data ); + } +} + + +static void +emulate_initial_switch_ready( uint64_t* dpids, size_t n_dpids, void *user_data ) { + UNUSED( user_data ); + if( dpids == NULL ) { + error( "Failed to get initial switch lists" ); + return; + } + + for ( size_t i = 0 ; i < n_dpids ; ++i ) { + handle_switch_ready( dpids[i], NULL ); + } +} + + bool init_topology_management( void ) { bool result = true; @@ -281,6 +310,9 @@ finalize_topology_management( void ) { bool start_topology_management( void ) { + add_event_forward_entry_to_all_switches( EVENT_FORWARD_TYPE_PORT_STATUS, get_trema_name(), handle_event_forward_entry_to_all_result, PORT_STATUS ); + add_event_forward_entry_to_all_switches( EVENT_FORWARD_TYPE_STATE_NOTIFY, get_trema_name(), handle_event_forward_entry_to_all_result, STATE_NOTIFY ); + send_efi_switch_list_request( emulate_initial_switch_ready, NULL ); return true; } diff --git a/unittests/topology/discovery_management_test.c b/unittests/topology/discovery_management_test.c index 70e8a4bf4..b93aa08fe 100644 --- a/unittests/topology/discovery_management_test.c +++ b/unittests/topology/discovery_management_test.c @@ -32,7 +32,6 @@ ********************************************************************************/ #define TEST_TREMA_NAME "disc_mgmt_test" -#define TEST_OFA_NAME TEST_TREMA_NAME ".ofa" // defined in trema.c extern void set_trema_name( const char *name ); extern void _free_trema_name(); @@ -49,6 +48,8 @@ static bool ( *original_set_switch_status_updated_hook )( switch_status_updated_ static void ( *original_execute_timer_events )( int *next_timeout_usec ); +static bool ( *original_send_request_message )( const char *to_service_name, const char *from_service_name, const uint16_t tag, const void *data, size_t len, void *user_data ); + #define swap_original( funcname ) \ original_##funcname = funcname;\ funcname = mock_##funcname; @@ -147,6 +148,114 @@ mock_execute_timer_events( int *next_timeout_usec ) { } +static bool free_user_data_member = false; +static bool free_event_forward_operation_to_all_request_param = false; +struct callback_info { + void *callback; + void *user_data; +}; +struct event_forward_operation_to_all_request_param { + bool add; + enum efi_event_type type; + char* service_name; + event_forward_entry_to_all_callback callback; + void* user_data; +}; + +static bool +mock_send_request_message( const char *to_service_name, const char *from_service_name, + const uint16_t tag, const void *data, size_t len, void *user_data ) { + uint32_t tag32 = tag; + struct callback_info *hd = user_data; + + check_expected( to_service_name ); + check_expected( from_service_name ); + check_expected( tag32 ); + check_expected( data ); + check_expected( len ); + check_expected( hd->callback ); + check_expected( hd->user_data ); + + bool sent_ok = ( bool ) mock(); + if( sent_ok ) { + if ( free_event_forward_operation_to_all_request_param ) { + struct event_forward_operation_to_all_request_param *p = hd->user_data; + + xfree( p->service_name ); + xfree( p ); + hd->user_data = NULL; + } + if ( free_user_data_member ) { + xfree( hd->user_data ); + } + xfree( hd ); + } + return sent_ok; +} + + +static void +expect_enable_discovery() { + struct expected_data { + management_application_request mgmt; + event_forward_operation_request efi; + char topology[14+1]; + } __attribute__( ( packed ) ) expected_data = { + .mgmt = { + .header = { + .type = htons( MANAGEMENT_APPLICATION_REQUEST ), + .length = htonl(sizeof( struct expected_data ) ), + }, + .application_id = htonl(EVENT_FORWARD_ENTRY_ADD), + }, + .efi = { + .type = EVENT_FORWARD_TYPE_PACKET_IN, + .n_services = htonl( 1 ), + }, + .topology = TEST_TREMA_NAME, + }; + + expect_string( mock_send_request_message, to_service_name, "switch_manager.m" ); + expect_any( mock_send_request_message, from_service_name ); + expect_value( mock_send_request_message, tag32, MESSENGER_MANAGEMENT_REQUEST ); + expect_memory( mock_send_request_message, data, &expected_data, sizeof( struct expected_data ) ); + expect_value( mock_send_request_message, len, sizeof( struct expected_data ) ); + expect_any( mock_send_request_message, hd->callback ); + expect_any( mock_send_request_message, hd->user_data ); + will_return( mock_send_request_message, true ); +} + + +static void +expect_disable_discovery() { + struct expected_data { + management_application_request mgmt; + event_forward_operation_request efi; + char topology[14+1]; + } __attribute__( ( packed ) ) expected_data = { + .mgmt = { + .header = { + .type = htons( MANAGEMENT_APPLICATION_REQUEST ), + .length = htonl(sizeof( struct expected_data ) ), + }, + .application_id = htonl(EVENT_FORWARD_ENTRY_DELETE), + }, + .efi = { + .type = EVENT_FORWARD_TYPE_PACKET_IN, + .n_services = htonl( 1 ), + }, + .topology = TEST_TREMA_NAME, + }; + + expect_string( mock_send_request_message, to_service_name, "switch_manager.m" ); + expect_any( mock_send_request_message, from_service_name ); + expect_value( mock_send_request_message, tag32, MESSENGER_MANAGEMENT_REQUEST ); + expect_memory( mock_send_request_message, data, &expected_data, sizeof( struct expected_data ) ); + expect_value( mock_send_request_message, len, sizeof( struct expected_data ) ); + expect_any( mock_send_request_message, hd->callback ); + expect_any( mock_send_request_message, hd->user_data ); + will_return( mock_send_request_message, true ); +} /******************************************************************************** * Setup and teardown functions. ********************************************************************************/ @@ -154,16 +263,20 @@ mock_execute_timer_events( int *next_timeout_usec ) { static void setup() { + free_user_data_member = false; + free_event_forward_operation_to_all_request_param = false; set_trema_name( TEST_TREMA_NAME ); init_messenger( "/tmp" ); init_timer(); init_stat(); - init_openflow_application_interface( TEST_OFA_NAME ); + init_openflow_application_interface( TEST_TREMA_NAME ); + init_event_forward_interface(); swap_original( notice ); swap_original( warn ); swap_original( set_switch_status_updated_hook ); swap_original( set_port_status_updated_hook ); + swap_original( send_request_message ); } @@ -175,7 +288,9 @@ teardown() { check_warn = false; revert_original( set_switch_status_updated_hook ); revert_original( set_port_status_updated_hook ); + revert_original( send_request_message ); + finalize_event_forward_interface(); finalize_openflow_application_interface(); finalize_stat(); finalize_timer(); @@ -240,8 +355,12 @@ test_init_finalize_with_always_discovery() { expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); assert_true( start_discovery_management() ); +// disable_discovery(); + finalize_discovery_management(); // deleted timer event struct will not be freed until next timer event @@ -258,18 +377,22 @@ test_enable_discovery_twice_prints_message() { expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); expect_string( mock_warn_check, message, "Topology Discovery is already enabled." ); expect_switch_and_port_status_hook_set(); + expect_enable_discovery(); enable_discovery(); check_warn = false; expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); } @@ -282,16 +405,20 @@ test_disable_discovery_twice_prints_message() { expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); expect_string( mock_warn_check, message, "Topology Discovery was not enabled." ); expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); check_warn = false; @@ -339,6 +466,8 @@ static void test_switch_status_event_then_flow_mod_lldp_if_sw_up() { setup_discovery_mgmt(); expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); assert_true( switch_status_updated_hook_callback != NULL ); @@ -370,6 +499,7 @@ test_switch_status_event_then_flow_mod_lldp_if_sw_up() { assert_true( delete_message_received_callback( SRC_SW_MSNGER_NAME, helper_sw_received_flow_mod_add_lldp_message_end ) ); expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); teardown_discovery_mgmt(); @@ -432,6 +562,8 @@ test_switch_status_event_over_ip_then_flow_mod_lldp_if_sw_up() { assert_true( start_discovery_management() ); expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); assert_true( switch_status_updated_hook_callback != NULL ); @@ -467,6 +599,7 @@ test_switch_status_event_over_ip_then_flow_mod_lldp_if_sw_up() { assert_true( delete_message_received_callback( SRC_SW_MSNGER_NAME, helper_sw_received_flow_mod_add_lldp_over_ip_message_end ) ); expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); teardown_discovery_mgmt(); @@ -479,6 +612,8 @@ test_port_status_event() { swap_original( execute_timer_events ); expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); assert_true( port_status_updated_hook_callback != NULL ); @@ -506,6 +641,7 @@ test_port_status_event() { delete_sw_entry( sw ); expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); revert_original( execute_timer_events ); @@ -567,6 +703,8 @@ test_enable_discovery_when_sw_exist_then_flow_mod_add_lldp() { datapath_id, 0x1234 ); expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); start_messenger(); @@ -577,6 +715,7 @@ test_enable_discovery_when_sw_exist_then_flow_mod_add_lldp() { delete_sw_entry( sw ); expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); teardown_discovery_mgmt(); @@ -597,6 +736,8 @@ test_disable_discovery_when_sw_exist_then_flow_mod_del_lldp() { datapath_id, 0x1234 ); expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); const uint64_t datapath_id = 0x1234; @@ -604,6 +745,7 @@ test_disable_discovery_when_sw_exist_then_flow_mod_del_lldp() { sw->up = true; expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); start_messenger(); @@ -692,6 +834,8 @@ test_enable_discovery_when_sw_exist_then_flow_mod_add_lldp_over_ip() { nw_dst, 0x89ABCDEF ); expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); start_messenger(); @@ -702,6 +846,7 @@ test_enable_discovery_when_sw_exist_then_flow_mod_add_lldp_over_ip() { delete_sw_entry( sw ); expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); teardown_discovery_mgmt(); @@ -734,6 +879,8 @@ test_disable_discovery_when_sw_exist_then_flow_mod_del_lldp_over_ip() { nw_dst, 0x89ABCDEF ); expect_switch_and_port_status_hook_set(); + free_event_forward_operation_to_all_request_param = true; + expect_enable_discovery(); enable_discovery(); const uint64_t datapath_id = 0x1234; @@ -741,6 +888,7 @@ test_disable_discovery_when_sw_exist_then_flow_mod_del_lldp_over_ip() { sw->up = true; expect_switch_and_port_status_hook_clear(); + expect_disable_discovery(); disable_discovery(); start_messenger(); diff --git a/unittests/topology/service_management_test.c b/unittests/topology/service_management_test.c index 5ddb7d30a..ee93cba13 100644 --- a/unittests/topology/service_management_test.c +++ b/unittests/topology/service_management_test.c @@ -46,6 +46,12 @@ extern void _free_trema_name(); // defined in service_management.c extern void ping_all_subscriber(void* user_data ); +//defined in discovery_manager.h +extern void _enable_discovery( void ); +extern void _disable_discovery( void ); +extern void (* enable_discovery )( void ); +extern void (* disable_discovery )( void ); + /******************************************************************************** * Mock functions. ********************************************************************************/ @@ -64,6 +70,7 @@ static bool ( *original_add_periodic_event_callback )( const time_t seconds, tim static uint8_t ( *original_set_discovered_link_status )( topology_update_link_status* link_status ); + static bool mock_add_message_requested_callback( const char *service_name, void ( *callback )( const messenger_context_handle *handle, uint16_t tag, void *data, size_t len ) ) { @@ -438,6 +445,17 @@ mock_execute_timer_events( int *next_timeout_usec ) { } +static void +mock_enable_discovery() { + +} + + +static void +mock_disable_discovery() { + +} + /******************************************************************************** * Setup and teardown functions. ********************************************************************************/ @@ -467,6 +485,8 @@ teardown_fake_messenger() { static void setup_service_management() { + enable_discovery = _enable_discovery; + disable_discovery = _disable_discovery; set_trema_name( TEST_TREMA_NAME ); service_management_options options = { @@ -999,7 +1019,7 @@ test_ping_ageout_subscriber() { static void -test_subscribe_from_client() { +test_recv_subscribe_from_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); @@ -1030,6 +1050,7 @@ test_subscribe_from_client() { assert_true( e != NULL ); assert_string_equal( e->name, TEST_SUBSCRIBER_NAME ); + // clean up delete_subscriber_entry( e ); assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); @@ -1042,7 +1063,7 @@ test_subscribe_from_client() { static void -test_subscribe_from_subscribed_client_return_already_subscribed() { +test_recv_subscribe_from_subscribed_client_return_already_subscribed() { // note: setup/teardown differ from test_subscribe_from_client() // avoid periodic ping event from running. @@ -1075,6 +1096,7 @@ test_subscribe_from_subscribed_client_return_already_subscribed() { assert_true( e != NULL ); assert_string_equal( e->name, TEST_SUBSCRIBER_NAME ); + // clean up assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); finalize_timer(); @@ -1085,7 +1107,7 @@ test_subscribe_from_subscribed_client_return_already_subscribed() { static void -test_unsubscribe_from_client() { +test_recv_unsubscribe_from_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); @@ -1094,11 +1116,10 @@ test_unsubscribe_from_client() { init_timer(); assert_true( add_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); -// assert_true( add_message_received_callback( TEST_CONTROL_NAME, callback_test_control_notification_handler ) ); start_service_management(); - // check subscriber table + // prepare subscriber table assert_true( insert_subscriber_entry( TEST_SUBSCRIBER_NAME ) ); subscriber_entry* e = lookup_subscriber_entry( TEST_SUBSCRIBER_NAME ); assert_true( e != NULL ); @@ -1123,8 +1144,8 @@ test_unsubscribe_from_client() { subscriber_entry* ea = lookup_subscriber_entry( TEST_SUBSCRIBER_NAME ); assert_true( ea == NULL ); + // clean up assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); -// assert_true( delete_message_received_callback( TEST_CONTROL_NAME, callback_test_control_notification_handler ) ); finalize_timer(); finalize_messenger(); @@ -1134,7 +1155,7 @@ test_unsubscribe_from_client() { static void -test_unsubscribe_from_unsubscribed_client_return_no_such_subscriber() { +test_recv_unsubscribe_from_unsubscribed_client_return_no_such_subscriber() { // note: setup/teardown differ from test_subscribe_from_client() // avoid periodic ping event from running. @@ -1169,6 +1190,7 @@ test_unsubscribe_from_unsubscribed_client_return_no_such_subscriber() { subscriber_entry* ea = lookup_subscriber_entry( TEST_SUBSCRIBER_NAME ); assert_true( ea == NULL ); + // clean up assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); finalize_timer(); @@ -1179,11 +1201,13 @@ test_unsubscribe_from_unsubscribed_client_return_no_such_subscriber() { static void -test_enable_discovery_from_client() { +test_recv_enable_discovery_from_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); + enable_discovery = mock_enable_discovery; + init_messenger( "/tmp" ); init_timer(); @@ -1215,6 +1239,7 @@ test_enable_discovery_from_client() { assert_string_equal( ea->name, TEST_SUBSCRIBER_NAME ); assert_true( ea->use_discovery ); + // clean up assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); finalize_timer(); @@ -1225,11 +1250,13 @@ test_enable_discovery_from_client() { static void -test_enable_discovery_from_client_when_already_enabled() { +test_recv_enable_discovery_from_client_when_already_enabled() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); + enable_discovery = mock_enable_discovery; + init_messenger( "/tmp" ); init_timer(); @@ -1261,6 +1288,7 @@ test_enable_discovery_from_client_when_already_enabled() { assert_string_equal( ea->name, TEST_SUBSCRIBER_NAME ); assert_true( ea->use_discovery ); + // clean up assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); finalize_timer(); @@ -1271,11 +1299,13 @@ test_enable_discovery_from_client_when_already_enabled() { static void -test_enable_discovery_from_unsubscribed_client() { +test_recv_enable_discovery_from_unsubscribed_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); + enable_discovery = mock_enable_discovery; + init_messenger( "/tmp" ); init_timer(); @@ -1305,6 +1335,7 @@ test_enable_discovery_from_unsubscribed_client() { assert_string_equal( ea->name, TEST_SUBSCRIBER_NAME ); assert_true( ea->use_discovery ); + // clean up delete_subscriber_entry( ea ); assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); @@ -1316,11 +1347,13 @@ test_enable_discovery_from_unsubscribed_client() { static void -test_disable_discovery_from_client() { +test_recv_disable_discovery_from_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); + disable_discovery = mock_disable_discovery; + init_messenger( "/tmp" ); init_timer(); @@ -1352,6 +1385,7 @@ test_disable_discovery_from_client() { assert_string_equal( ea->name, TEST_SUBSCRIBER_NAME ); assert_false( ea->use_discovery ); + // clean up assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); finalize_timer(); @@ -1362,11 +1396,13 @@ test_disable_discovery_from_client() { static void -test_disable_discovery_from_unsubscribed_client() { +test_recv_disable_discovery_from_unsubscribed_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); + disable_discovery = mock_disable_discovery; + init_messenger( "/tmp" ); init_timer(); @@ -1394,6 +1430,7 @@ test_disable_discovery_from_unsubscribed_client() { subscriber_entry* ea = lookup_subscriber_entry( TEST_SUBSCRIBER_NAME ); assert_true( ea == NULL ); + // clean up assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); finalize_timer(); @@ -1404,7 +1441,7 @@ test_disable_discovery_from_unsubscribed_client() { static void -test_query_switch_status_from_client() { +test_recv_query_switch_status_from_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); @@ -1429,7 +1466,7 @@ test_query_switch_status_from_client() { buf->data, buf->length, NULL ); free_buffer( buf ); - + // check reply expect_value( mock_query_switch_status_reply, number_of_switches, 1 ); expect_value( mock_query_switch_status_reply, status, TD_SWITCH_DOWN ); expect_value( mock_query_switch_status_reply, dpid, 0x1234 ); @@ -1437,6 +1474,7 @@ test_query_switch_status_from_client() { start_event_handler(); start_messenger(); + // clean up delete_sw_entry( sw ); assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); @@ -1449,7 +1487,7 @@ test_query_switch_status_from_client() { static void -test_query_port_status_from_client() { +test_recv_query_port_status_from_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); @@ -1480,7 +1518,7 @@ test_query_port_status_from_client() { buf->data, buf->length, NULL ); free_buffer( buf ); - + // check reply expect_value( mock_query_port_status_reply, number_of_ports, 1 ); expect_value( mock_query_port_status_reply, status, TD_PORT_DOWN ); expect_value( mock_query_port_status_reply, dpid, 0x1234 ); @@ -1492,6 +1530,7 @@ test_query_port_status_from_client() { start_event_handler(); start_messenger(); + // clean up delete_port_entry( sw, port ); delete_sw_entry( sw ); @@ -1505,7 +1544,7 @@ test_query_port_status_from_client() { static void -test_query_link_status_from_client() { +test_recv_query_link_status_from_client() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); @@ -1561,7 +1600,7 @@ test_query_link_status_from_client() { buf->data, buf->length, NULL ); free_buffer( buf ); - + // check reply expect_value( mock_query_link_status_reply, number_of_links, 3 ); expect_value( mock_query_link_status_reply, status, TD_LINK_DOWN ); @@ -1586,6 +1625,7 @@ test_query_link_status_from_client() { start_event_handler(); start_messenger(); + // clean up delete_link_to( port ); delete_link_to( port2 ); delete_port_entry( sw, port ); @@ -1646,7 +1686,7 @@ test_set_discovered_link_status() { link_status.to_portno = 72; link_status.status = TD_LINK_UP; - + // check called handlers expect_value( mock_link_status_notification, from_dpid, 0x1234 ); expect_value( mock_link_status_notification, from_portno, 42 ); expect_value( mock_link_status_notification, to_dpid, 0x5678 ); @@ -1654,7 +1694,6 @@ test_set_discovered_link_status() { expect_value( mock_link_status_notification, status, TD_LINK_UP ); will_return( mock_link_status_notification, END_ON_RETURN ); - expect_value( local_link_status_updated_handler, user_data, NULL ); expect_not_value( local_link_status_updated_handler, sw, NULL ); expect_value( local_link_status_updated_handler, port_no, 42 ); @@ -1666,12 +1705,14 @@ test_set_discovered_link_status() { expect_value( local_link_status_updated_handler, link_port_no, 72 ); expect_value( local_link_status_updated_handler, link_up, true ); + // set links uint8_t result = set_discovered_link_status( &link_status ); assert_int_equal( result, TD_RESPONSE_OK ); start_event_handler(); start_messenger(); + // clean up delete_link_to( port ); delete_link_to( port2 ); delete_port_entry( sw, port ); @@ -1731,7 +1772,7 @@ test_set_discovered_link_status_port_external() { link_status.to_portno = 72; link_status.status = TD_LINK_DOWN; - + // check called handlers expect_value( mock_link_status_notification, from_dpid, 0x1234 ); expect_value( mock_link_status_notification, from_portno, 42 ); expect_value( mock_link_status_notification, to_dpid, 0x5678 ); @@ -1747,7 +1788,6 @@ test_set_discovered_link_status_port_external() { expect_value( mock_port_status_notification, status, TD_PORT_UP ); will_return( mock_port_status_notification, END_ON_RETURN ); - expect_value( local_link_status_updated_handler, user_data, NULL ); expect_not_value( local_link_status_updated_handler, sw, NULL ); expect_value( local_link_status_updated_handler, port_no, 42 ); @@ -1767,13 +1807,14 @@ test_set_discovered_link_status_port_external() { expect_value( local_port_status_updated_handler, up, true ); expect_value( local_port_status_updated_handler, external, true ); - + // set links uint8_t result = set_discovered_link_status( &link_status ); assert_int_equal( result, TD_RESPONSE_OK ); start_event_handler(); start_messenger(); + // clean up delete_link_to( port ); delete_link_to( port2 ); delete_port_entry( sw, port ); @@ -1833,7 +1874,7 @@ test_set_discovered_link_status_linkchange() { link_status.to_portno = 72; link_status.status = TD_LINK_UP; - + // check called handlers expect_value( mock_link_status_notification, from_dpid, 0x1234 ); expect_value( mock_link_status_notification, from_portno, 42 ); expect_value( mock_link_status_notification, to_dpid, 0x5678 ); @@ -1841,7 +1882,6 @@ test_set_discovered_link_status_linkchange() { expect_value( mock_link_status_notification, status, TD_LINK_UP ); will_return( mock_link_status_notification, END_ON_RETURN ); - expect_value( local_link_status_updated_handler, user_data, NULL ); expect_not_value( local_link_status_updated_handler, sw, NULL ); expect_value( local_link_status_updated_handler, port_no, 42 ); @@ -1853,12 +1893,14 @@ test_set_discovered_link_status_linkchange() { expect_value( local_link_status_updated_handler, link_port_no, 72 ); expect_value( local_link_status_updated_handler, link_up, true ); + // set links uint8_t result = set_discovered_link_status( &link_status ); assert_int_equal( result, TD_RESPONSE_OK ); start_event_handler(); start_messenger(); + // clean up delete_link_to( port ); delete_link_to( port2 ); delete_port_entry( sw, port ); @@ -1912,10 +1954,12 @@ test_set_discovered_link_status_on_down_port_fail() { link_status.to_portno = 72; link_status.status = TD_LINK_UP; - + // set links uint8_t result = set_discovered_link_status( &link_status ); + // check call to fail assert_int_equal( result, TD_RESPONSE_INVALID ); + // clean up delete_link_to( port ); delete_link_to( port2 ); delete_port_entry( sw, port ); @@ -1947,10 +1991,12 @@ test_set_discovered_link_status_on_invalid_port_fail() { link_status.to_portno = 72; link_status.status = TD_LINK_UP; - + // set links uint8_t result = set_discovered_link_status( &link_status ); + // check call to fail assert_int_equal( result, TD_RESPONSE_INVALID ); + // clean up delete_sw_entry( sw ); assert_true( set_port_status_updated_hook( NULL, NULL ) ); @@ -1972,8 +2018,9 @@ test_set_discovered_link_status_on_invalid_switch_fail() { link_status.to_portno = 72; link_status.status = TD_LINK_UP; - + // set link uint8_t result = set_discovered_link_status( &link_status ); + // check call to fail assert_int_equal( result, TD_RESPONSE_INVALID ); assert_true( set_port_status_updated_hook( NULL, NULL ) ); @@ -1982,7 +2029,7 @@ test_set_discovered_link_status_on_invalid_switch_fail() { static void -test_update_link_status_request() { +test_recv_update_link_status_request() { // avoid periodic ping event from running. void ( *original_execute_timer_events )( int *next_timeout_usec ); swap_original( execute_timer_events ); @@ -2005,6 +2052,7 @@ test_update_link_status_request() { req->to_portno = htons( 72 ); req->status = TD_LINK_UP; + // check internal API call expect_value( mock_set_discovered_link_status, from_dpid, 0x1234 ); expect_value( mock_set_discovered_link_status, from_portno, 42 ); expect_value( mock_set_discovered_link_status, to_dpid, 0x5678 ); @@ -2021,6 +2069,7 @@ test_update_link_status_request() { start_event_handler(); start_messenger(); + // clean up assert_true( delete_message_replied_callback( TEST_SUBSCRIBER_NAME, callback_fake_libtopology_client_reply_end ) ); finalize_timer(); @@ -2068,22 +2117,22 @@ main() { unit_test_setup_teardown( test_ping_subscriber, setup_fake_subscriber, teardown_fake_subscriber ), unit_test_setup_teardown( test_ping_ageout_subscriber, setup_fake_messenger, teardown_fake_messenger ), - unit_test_setup_teardown( test_subscribe_from_client, setup_service_management, teardown_service_management ), - unit_test_setup_teardown( test_subscribe_from_subscribed_client_return_already_subscribed, setup_fake_subscriber, teardown_fake_subscriber ), + unit_test_setup_teardown( test_recv_subscribe_from_client, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_subscribe_from_subscribed_client_return_already_subscribed, setup_fake_subscriber, teardown_fake_subscriber ), - unit_test_setup_teardown( test_unsubscribe_from_client, setup_service_management, teardown_service_management ), - unit_test_setup_teardown( test_unsubscribe_from_unsubscribed_client_return_no_such_subscriber, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_unsubscribe_from_client, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_unsubscribe_from_unsubscribed_client_return_no_such_subscriber, setup_service_management, teardown_service_management ), - unit_test_setup_teardown( test_enable_discovery_from_client, setup_fake_subscriber, teardown_fake_subscriber ), - unit_test_setup_teardown( test_enable_discovery_from_client_when_already_enabled, setup_fake_subscriber, teardown_fake_subscriber ), - unit_test_setup_teardown( test_enable_discovery_from_unsubscribed_client, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_enable_discovery_from_client, setup_fake_subscriber, teardown_fake_subscriber ), + unit_test_setup_teardown( test_recv_enable_discovery_from_client_when_already_enabled, setup_fake_subscriber, teardown_fake_subscriber ), + unit_test_setup_teardown( test_recv_enable_discovery_from_unsubscribed_client, setup_service_management, teardown_service_management ), - unit_test_setup_teardown( test_disable_discovery_from_client, setup_fake_subscriber, teardown_fake_subscriber ), - unit_test_setup_teardown( test_disable_discovery_from_unsubscribed_client, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_disable_discovery_from_client, setup_fake_subscriber, teardown_fake_subscriber ), + unit_test_setup_teardown( test_recv_disable_discovery_from_unsubscribed_client, setup_service_management, teardown_service_management ), - unit_test_setup_teardown( test_query_switch_status_from_client, setup_service_management, teardown_service_management ), - unit_test_setup_teardown( test_query_port_status_from_client, setup_service_management, teardown_service_management ), - unit_test_setup_teardown( test_query_link_status_from_client, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_query_switch_status_from_client, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_query_port_status_from_client, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_query_link_status_from_client, setup_service_management, teardown_service_management ), unit_test_setup_teardown( test_set_discovered_link_status, setup_fake_subscriber, teardown_fake_subscriber ), unit_test_setup_teardown( test_set_discovered_link_status_port_external, setup_fake_subscriber, teardown_fake_subscriber ), @@ -2092,7 +2141,7 @@ main() { unit_test_setup_teardown( test_set_discovered_link_status_on_invalid_port_fail, setup_fake_subscriber, teardown_fake_subscriber ), unit_test_setup_teardown( test_set_discovered_link_status_on_invalid_switch_fail, setup_fake_subscriber, teardown_fake_subscriber ), - unit_test_setup_teardown( test_update_link_status_request, setup_service_management, teardown_service_management ), + unit_test_setup_teardown( test_recv_update_link_status_request, setup_service_management, teardown_service_management ), }; diff --git a/unittests/topology/topology_management_test.c b/unittests/topology/topology_management_test.c index 73f340b81..e5aabeb24 100644 --- a/unittests/topology/topology_management_test.c +++ b/unittests/topology/topology_management_test.c @@ -32,6 +32,12 @@ ********************************************************************************/ +#define TEST_TREMA_NAME "test_topo_mgmt" + +// defined in trema.c +extern void set_trema_name( const char *name ); +extern void _free_trema_name(); + /******************************************************************************** * Mock functions. ********************************************************************************/ @@ -48,6 +54,7 @@ static void ( *original_notify_switch_status_for_all_user )( sw_entry *sw ); static void ( *original_notify_port_status_for_all_user )( port_entry *port ); static void ( *original_notify_link_status_for_all_user )( port_entry *port ); +static bool ( *original_send_request_message )( const char *to_service_name, const char *from_service_name, const uint16_t tag, const void *data, size_t len, void *user_data ); static int mock_notify_switch_status_for_all_user_calls = 0; static int mock_notify_switch_status_for_all_user_max = 0; @@ -112,24 +119,173 @@ mock_notify_link_status_for_all_user( port_entry *port ) { check_expected( up ); } + +static bool free_user_data_member = false; +struct callback_info { + void *callback; + void *user_data; +}; +struct event_forward_operation_to_all_request_param { + bool add; + enum efi_event_type type; + char* service_name; + event_forward_entry_to_all_callback callback; + void* user_data; +}; + + +static bool +mock_send_request_message( const char *to_service_name, const char *from_service_name, + const uint16_t tag, const void *data, size_t len, void *user_data ) { + uint32_t tag32 = tag; + struct callback_info *hd = user_data; + + check_expected( to_service_name ); + check_expected( from_service_name ); + check_expected( tag32 ); + check_expected( data ); + check_expected( len ); + check_expected( hd->callback ); + check_expected( hd->user_data ); + + bool sent_ok = ( bool ) mock(); + if( sent_ok ) { + if ( free_user_data_member ) { + const management_application_request* mgmt = data; + switch ( htonl(mgmt->application_id) ) { + case EVENT_FORWARD_ENTRY_ADD: + case EVENT_FORWARD_ENTRY_DELETE: + case EVENT_FORWARD_ENTRY_DUMP: + case EVENT_FORWARD_ENTRY_SET: + { + struct event_forward_operation_to_all_request_param *p = hd->user_data; + + xfree( p->service_name ); + xfree( p ); + hd->user_data = NULL; + } + break; + case EFI_GET_SWLIST: + // nothing to free + break; + default: + xfree( hd->user_data ); + break; + } + } + xfree( hd ); + } + return sent_ok; +} + + +static void +expect_port_status_set() { + struct expected_data { + management_application_request mgmt; + event_forward_operation_request efi; + char topology[14+1]; + } __attribute__( ( packed ) ) expected_data = { + .mgmt = { + .header = { + .type = htons( MANAGEMENT_APPLICATION_REQUEST ), + .length = htonl(sizeof( struct expected_data ) ), + }, + .application_id = htonl(EVENT_FORWARD_ENTRY_ADD), + }, + .efi = { + .type = EVENT_FORWARD_TYPE_PORT_STATUS, + .n_services = htonl( 1 ), + }, + .topology = TEST_TREMA_NAME + }; + + expect_string( mock_send_request_message, to_service_name, "switch_manager.m" ); + expect_any( mock_send_request_message, from_service_name ); + expect_value( mock_send_request_message, tag32, MESSENGER_MANAGEMENT_REQUEST ); + expect_memory( mock_send_request_message, data, &expected_data, sizeof( struct expected_data ) ); + expect_value( mock_send_request_message, len, sizeof( struct expected_data ) ); + expect_any( mock_send_request_message, hd->callback ); + expect_any( mock_send_request_message, hd->user_data ); + will_return( mock_send_request_message, true ); +} + + +static void +expect_state_notify_set() { + struct expected_data { + management_application_request mgmt; + event_forward_operation_request efi; + char topology[14+1]; + } __attribute__( ( packed ) ) expected_data = { + .mgmt = { + .header = { + .type = htons( MANAGEMENT_APPLICATION_REQUEST ), + .length = htonl(sizeof( struct expected_data ) ), + }, + .application_id = htonl(EVENT_FORWARD_ENTRY_ADD), + }, + .efi = { + .type = EVENT_FORWARD_TYPE_STATE_NOTIFY, + .n_services = htonl( 1 ), + }, + .topology = TEST_TREMA_NAME, + }; + + expect_string( mock_send_request_message, to_service_name, "switch_manager.m" ); + expect_any( mock_send_request_message, from_service_name ); + expect_value( mock_send_request_message, tag32, MESSENGER_MANAGEMENT_REQUEST ); + expect_memory( mock_send_request_message, data, &expected_data, sizeof( struct expected_data ) ); + expect_value( mock_send_request_message, len, sizeof( struct expected_data ) ); + expect_any( mock_send_request_message, hd->callback ); + expect_any( mock_send_request_message, hd->user_data ); + will_return( mock_send_request_message, true ); +} + + +static void +expect_switch_list_request() { + struct expected_data { + management_application_request mgmt; + } __attribute__( ( packed ) ) expected_data = { + .mgmt = { + .header = { + .type = htons( MANAGEMENT_APPLICATION_REQUEST ), + .length = htonl(sizeof( struct expected_data ) ), + }, + .application_id = htonl(EFI_GET_SWLIST), + }, + }; + + expect_string( mock_send_request_message, to_service_name, "switch_manager.m" ); + expect_any( mock_send_request_message, from_service_name ); + expect_value( mock_send_request_message, tag32, MESSENGER_MANAGEMENT_REQUEST ); + expect_memory( mock_send_request_message, data, &expected_data, sizeof( struct expected_data ) ); + expect_value( mock_send_request_message, len, sizeof( struct expected_data ) ); + expect_any( mock_send_request_message, hd->callback ); + expect_any( mock_send_request_message, hd->user_data ); + will_return( mock_send_request_message, true ); +} + /******************************************************************************** * Setup and teardown functions. ********************************************************************************/ -const char* OFA_SERVICE_NAME = "test_topo_mgmt.ofa"; - - static void setup() { + set_trema_name( TEST_TREMA_NAME ); init_messenger("/tmp"); init_timer(); init_stat(); - init_openflow_application_interface( OFA_SERVICE_NAME ); + init_openflow_application_interface( TEST_TREMA_NAME ); + init_event_forward_interface(); swap_original( notify_switch_status_for_all_user ); swap_original( notify_port_status_for_all_user ); swap_original( notify_link_status_for_all_user ); + swap_original( send_request_message ); + free_user_data_member = false; mock_notify_switch_status_for_all_user_calls = 0; mock_notify_switch_status_for_all_user_max = 0; mock_notify_port_status_for_all_user_calls = 0; @@ -139,11 +295,13 @@ setup() { static void teardown() { + finalize_event_forward_interface(); finalize_openflow_application_interface(); finalize_timer(); finalize_stat(); finalize_messenger(); + revert_original( send_request_message ); revert_original( notify_switch_status_for_all_user ); revert_original( notify_port_status_for_all_user ); revert_original( notify_link_status_for_all_user ); @@ -151,6 +309,7 @@ teardown() { mock_notify_switch_status_for_all_user_max = 0; mock_notify_port_status_for_all_user_calls = 0; mock_notify_port_status_for_all_user_max = 0; + _free_trema_name(); } @@ -158,6 +317,10 @@ static void setup_topology_mgmt() { setup(); assert_true( init_topology_management() ); + free_user_data_member = true; + expect_port_status_set(); + expect_state_notify_set(); + expect_switch_list_request(); assert_true( start_topology_management() ); } @@ -178,6 +341,10 @@ teardown_topology_mgmt() { static void test_init_start_finalize_topology_management() { assert_true( init_topology_management() ); + free_user_data_member = true; + expect_port_status_set(); + expect_state_notify_set(); + expect_switch_list_request(); assert_true( start_topology_management() ); finalize_topology_management(); } @@ -210,7 +377,7 @@ test_receive_switch_ready_then_notify_sw_status_and_request_features() { data.datapath_id = htonll( 0x1234 ); data.service_name_length = 0; const size_t len = sizeof( openflow_service_header_t ); - assert_true( send_message( OFA_SERVICE_NAME, MESSENGER_OPENFLOW_READY, &data, len ) ); + assert_true( send_message( TEST_TREMA_NAME, MESSENGER_OPENFLOW_READY, &data, len ) ); // check notify to service mgmt expect_value( mock_notify_switch_status_for_all_user, datapath_id, 0x1234 ); @@ -295,7 +462,7 @@ test_feature_reply_then_update_ports() { port5->port_no = htons( OFPP_FLOOD ); port5->state = htonl( OFPPS_LINK_DOWN ); - assert_true( send_message( OFA_SERVICE_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); + assert_true( send_message( TEST_TREMA_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); free_buffer( buf ); // check notify to service mgmt @@ -362,7 +529,7 @@ test_receive_switch_disconnected_then_notify_sw_status() { data.datapath_id = htonll( 0x1234 ); data.service_name_length = 0; const size_t len = sizeof( openflow_service_header_t ); - assert_true( send_message( OFA_SERVICE_NAME, MESSENGER_OPENFLOW_DISCONNECTED, &data, len ) ); + assert_true( send_message( TEST_TREMA_NAME, MESSENGER_OPENFLOW_DISCONNECTED, &data, len ) ); // check notify to service mgmt expect_value( mock_notify_link_status_for_all_user, from_dpid, 0x1234 ); @@ -434,7 +601,7 @@ test_receive_port_add_status_then_notify_port_status() { port_status->desc.config = htonl( OFPPC_PORT_DOWN ); sprintf( port_status->desc.name, "Added port" ); - assert_true( send_message( OFA_SERVICE_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); + assert_true( send_message( TEST_TREMA_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); free_buffer( buf ); // check notify to service mgmt @@ -506,7 +673,7 @@ test_receive_port_del_status_then_notify_port_status() { port_status->desc.state = htonl( 0 ); sprintf( port_status->desc.name, "Port removed" ); - assert_true( send_message( OFA_SERVICE_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); + assert_true( send_message( TEST_TREMA_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); free_buffer( buf ); // check notify to service mgmt @@ -579,7 +746,7 @@ test_receive_port_mod_status_then_notify_port_status() { port_status->desc.state = htonl( OFPPS_LINK_DOWN ); sprintf( port_status->desc.name, "Port changed" ); - assert_true( send_message( OFA_SERVICE_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); + assert_true( send_message( TEST_TREMA_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); free_buffer( buf ); // check notify to service mgmt @@ -654,7 +821,7 @@ test_receive_port_mod_status_port_no_then_notify_port_status() { port_status->desc.port_no = htons( 3 ); sprintf( port_status->desc.name, "Port changed" ); - assert_true( send_message( OFA_SERVICE_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); + assert_true( send_message( TEST_TREMA_NAME, MESSENGER_OPENFLOW_MESSAGE, buf->data, buf->length ) ); free_buffer( buf ); // check notify to service mgmt