diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index ecd66ef..0e83160 100644 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -38,8 +38,8 @@ Scenario execution involves the following steps: 6. Shaker starts tests one by one in order they are listed in the scenario. Test definition is converted into the actual command that will be executed by agent. Shaker schedules the command to be started at the same - time on all agents. For networking testing only agents in ``master`` role - are involved. Slave agents are used as back-end for corresponding commands + time on all agents. For networking testing only agents in ``primary`` role + are involved. Minion agents are used as back-end for corresponding commands (i.e. they run iperf in server mode). 7. Agents send their results to the server. Once all replies are received diff --git a/doc/source/catalog.rst b/doc/source/catalog.rst index 51d34df..feeb79c 100644 --- a/doc/source/catalog.rst +++ b/doc/source/catalog.rst @@ -23,7 +23,7 @@ OpenStack L2 Cross-AZ ^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches pairs of instances in the same tenant network. Every instance is hosted on a separate compute node, all available compute -nodes are utilized. The master and slave instances are in different +nodes are utilized. The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. The traffic goes within the tenant network (L2 domain). @@ -37,7 +37,7 @@ OpenStack L3 East-West Cross-AZ In this scenario Shaker launches pairs of instances, each instance on its own compute node. All available compute nodes are utilized. Instances are connected to one of 2 tenant networks, which plugged into single router. The traffic goes -from one network to the other (L3 east-west). The master and slave instances +from one network to the other (L3 east-west). The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. @@ -50,10 +50,10 @@ OpenStack L3 North-South Cross-AZ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches pairs of instances on different compute nodes. All available compute nodes are utilized. Instances are in different networks -connected to different routers, master accesses slave by floating ip. The +connected to different routers, primary accesses minion by floating ip. The traffic goes from one network via external network to the other network. The -master and slave instances are in different availability zones. The scenario is -used to test throughput between `nova` and `vcenter` zones. +primary and minion instances are in different availability zones. The scenario +is used to test throughput between `nova` and `vcenter` zones. To use this scenario specify parameter ``--scenario openstack/cross_az/full_l3_north_south``. Scenario source is available at: https://opendev.org/performa/shaker/src/branch/master/shaker/scenarios/openstack/cross_az/full_l3_north_south.yaml @@ -63,8 +63,8 @@ Scenario source is available at: https://opendev.org/performa/shaker/src/branch/ OpenStack L2 Cross-AZ Performance ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches 1 pair of instances in the same tenant -network. Each instance is hosted on a separate compute node. The master and -slave instances are in different availability zones. The scenario is used to +network. Each instance is hosted on a separate compute node. The primary and +minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. To use this scenario specify parameter ``--scenario openstack/cross_az/perf_l2``. @@ -77,8 +77,9 @@ OpenStack L3 East-West Cross-AZ Performance In this scenario Shaker launches 1 pair of instances, each instance on its own compute node. Instances are connected to one of 2 tenant networks, which plugged into single router. The traffic goes from one network to the other (L3 -east-west). The master and slave instances are in different availability zones. -The scenario is used to test throughput between `nova` and `vcenter` zones. +east-west). The primary and minion instances are in different availability +zones. The scenario is used to test throughput between `nova` and `vcenter` +zones. To use this scenario specify parameter ``--scenario openstack/cross_az/perf_l3_east_west``. Scenario source is available at: https://opendev.org/performa/shaker/src/branch/master/shaker/scenarios/openstack/cross_az/perf_l3_east_west.yaml @@ -89,8 +90,8 @@ OpenStack L3 North-South Cross-AZ Performance ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches 1 pair of instances on different compute nodes. Instances are in different networks connected to different routers, -master accesses slave by floating ip. The traffic goes from one network via -external network to the other network. The master and slave instances are in +primary accesses minion by floating ip. The traffic goes from one network via +external network to the other network. The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. @@ -103,7 +104,7 @@ OpenStack L2 Cross-AZ UDP ^^^^^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches pairs of instances in the same tenant network. Every instance is hosted on a separate compute node. The load is generated by -UDP traffic. The master and slave instances are in different availability +UDP traffic. The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. @@ -116,9 +117,9 @@ OpenStack L2 Cross-AZ UDP Jumbo ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches pairs of instances in the same tenant network. Every instance is hosted on a separate compute node. The load is generated by -UDP traffic and jumbo packets. The master and slave instances are in different -availability zones. The scenario is used to test throughput between `nova` and -`vcenter` zones. +UDP traffic and jumbo packets. The primary and minion instances are in +different availability zones. The scenario is used to test throughput between +`nova` and `vcenter` zones. To use this scenario specify parameter ``--scenario openstack/cross_az/udp_l2_mss8950``. Scenario source is available at: https://opendev.org/performa/shaker/src/branch/master/shaker/scenarios/openstack/cross_az/udp_l2_mss8950.yaml @@ -130,7 +131,7 @@ OpenStack L3 East-West Cross-AZ UDP In this scenario Shaker launches pairs of instances, each instance on its own compute node. Instances are connected to one of 2 tenant networks, which plugged into single router. The traffic goes from one network to the other (L3 -east-west). The load is generated by UDP traffic. The master and slave +east-west). The load is generated by UDP traffic. The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. @@ -276,7 +277,7 @@ OpenStack L3 North-South ^^^^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches pairs of instances on different compute nodes. All available compute nodes are utilized. Instances are in different networks -connected to different routers, master accesses slave by floating ip. The +connected to different routers, primary accesses minion by floating ip. The traffic goes from one network via external network to the other network. To use this scenario specify parameter ``--scenario openstack/full_l3_north_south``. @@ -311,7 +312,7 @@ OpenStack L3 North-South Performance ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches 1 pair of instances on different compute nodes. Instances are in different networks connected to different routers, -master accesses slave by floating ip. The traffic goes from one network via +primary accesses minion by floating ip. The traffic goes from one network via external network to the other network. To use this scenario specify parameter ``--scenario openstack/perf_l3_north_south``. @@ -357,8 +358,8 @@ Scenario source is available at: https://opendev.org/performa/shaker/src/branch/ OpenStack L3 North-South UDP ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In this scenario Shaker launches pairs of instances on different compute nodes. -Instances are in different networks connected to different routers, master -accesses slave by floating ip. The traffic goes from one network via external +Instances are in different networks connected to different routers, primary +accesses minion by floating ip. The traffic goes from one network via external network to the other network. The load is generated by UDP traffic. To use this scenario specify parameter ``--scenario openstack/udp_l3_north_south``. @@ -498,7 +499,7 @@ Template source is available at: https://opendev.org/performa/shaker/src/branch/ openstack/cross_az/l3_east_west ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This Heat template creates a pair of networks plugged into the same router. -Master instances and slave instances are connected into different networks. +Primary instances and minion instances are connected into different networks. Template source is available at: https://opendev.org/performa/shaker/src/branch/master/shaker/scenarios/openstack/cross_az/l3_east_west.hot @@ -546,7 +547,7 @@ Template source is available at: https://opendev.org/performa/shaker/src/branch/ openstack/l3_east_west ^^^^^^^^^^^^^^^^^^^^^^ This Heat template creates a pair of networks plugged into the same router. -Master instances and slave instances are connected into different networks. +Primary instances and minion instances are connected into different networks. Template source is available at: https://opendev.org/performa/shaker/src/branch/master/shaker/scenarios/openstack/l3_east_west.hot diff --git a/doc/source/examples/full_l3_north_south.rst b/doc/source/examples/full_l3_north_south.rst index dcc51aa..ea5744c 100644 --- a/doc/source/examples/full_l3_north_south.rst +++ b/doc/source/examples/full_l3_north_south.rst @@ -2,7 +2,7 @@ L3 North-South -------------- This scenario tests the bandwidth between pairs of instances deployed in different virtual networks. Instances -with master agents are located in one network, instances with slave agents are reached via their floating IPs. +with primary agents are located in one network, instances with minion agents are reached via their floating IPs. Each instance is deployed on its own compute node. The test increases the load from 1 pair pair until all available instances are used. diff --git a/doc/source/images/accommodation_double_room.svg b/doc/source/images/accommodation_double_room.svg index 4af89c5..c57b95e 100644 --- a/doc/source/images/accommodation_double_room.svg +++ b/doc/source/images/accommodation_double_room.svg @@ -184,7 +184,7 @@ x="-855.45026" sodipodi:role="line" id="tspan4805" - style="text-align:center;text-anchor:middle;fill:#ffffff">master_1 + style="text-align:center;text-anchor:middle;fill:#ffffff">primary_1 slave_1 + y="135.65411">minion_1 master_2 + y="262.73535">primary_2 slave_2 + style="text-align:center;text-anchor:middle;fill:#ffffff">minion_2 master_N + style="text-align:center;text-anchor:middle;fill:#ffffff">primary_N slave_N + y="619.15131">minion_N diff --git a/doc/source/images/accommodation_single_room.svg b/doc/source/images/accommodation_single_room.svg index 659c1f3..d2e7e8d 100644 --- a/doc/source/images/accommodation_single_room.svg +++ b/doc/source/images/accommodation_single_room.svg @@ -184,7 +184,7 @@ x="-905.03375" sodipodi:role="line" id="tspan4805" - style="text-align:center;text-anchor:middle;fill:#ffffff">master_1 + style="text-align:center;text-anchor:middle;fill:#ffffff">primary_1 slave_1 + y="250.82941">minion_1 master_M + y="515.31848">primary_M slave_M + style="text-align:center;text-anchor:middle;fill:#ffffff">minion_M master_1 + style="text-align:center;text-anchor:middle;fill:#ffffff">primary_1 master_1 + sodipodi:role="line">primary_1 master_2 + y="629.99701">primary_2 master_1 + sodipodi:role="line">primary_1 slave_1 + y="630.14862">minion_1 master_1 + y="636.23785">primary_1 slave_2 + sodipodi:role="line">minion_2 master_1 + style="text-align:center;text-anchor:middle;fill:#ffffff">primary_1 master_2 + y="758.66028">primary_2 slave_1 + y="647.55859">minion_1 slave_2 + sodipodi:role="line">minion_2 +# The amount of time the socket will wait for a response from a sent message, +# in milliseconds. (integer value) +#agent_socket_recv_timeout = + +# The amount of time the socket will wait until a sent message is accepted, in +# milliseconds. (integer value) +#agent_socket_send_timeout = + # Prior to exiting, the number of reconnects the Agent will attempt with the # server upon socket operation errors. (integer value) #agent_socket_conn_retries = 10 diff --git a/shaker/engine/deploy.py b/shaker/engine/deploy.py index dcc70e9..4a34448 100755 --- a/shaker/engine/deploy.py +++ b/shaker/engine/deploy.py @@ -40,18 +40,18 @@ def prepare_for_cross_az(compute_nodes, zones): LOG.warn('cross_az is specified, but len(zones) is not 2') return compute_nodes - masters = [] - slaves = [] + primary_list = [] + minions = [] for node in compute_nodes: if node['zone'] == zones[0]: - masters.append(node) + primary_list.append(node) else: - slaves.append(node) + minions.append(node) res = [] - for i in range(min(len(masters), len(slaves))): - res.append(masters[i]) - res.append(slaves[i]) + for i in range(min(len(primary_list), len(minions))): + res.append(primary_list[i]) + res.append(minions[i]) return res @@ -103,28 +103,28 @@ def generate_agents(compute_nodes, accommodation, unique): for i in range(iterations): if 'pair' in accommodation: - master_id = '%s_master_%s' % (unique, i) - slave_id = '%s_slave_%s' % (unique, i) - master = dict(id=master_id, mode='master', slave_id=slave_id) - slave = dict(id=slave_id, mode='slave', master_id=master_id) + primary_id = '%s_primary_%s' % (unique, i) + minion_id = '%s_minion_%s' % (unique, i) + primary = dict(id=primary_id, mode='primary', minion_id=minion_id) + minion = dict(id=minion_id, mode='minion', primary_id=primary_id) if 'single_room' in accommodation: - master_formula = lambda x: i * 2 - slave_formula = lambda x: i * 2 + 1 + primary_formula = lambda x: i * 2 + minion_formula = lambda x: i * 2 + 1 elif 'double_room' in accommodation: - master_formula = lambda x: i - slave_formula = lambda x: i + primary_formula = lambda x: i + minion_formula = lambda x: i else: # mixed_room - master_formula = lambda x: i - slave_formula = lambda x: i + 1 + primary_formula = lambda x: i + minion_formula = lambda x: i + 1 - m = node_formula(master_formula(i)) - master['node'], master['zone'] = m['host'], m['zone'] - s = node_formula(slave_formula(i)) - slave['node'], slave['zone'] = s['host'], s['zone'] + m = node_formula(primary_formula(i)) + primary['node'], primary['zone'] = m['host'], m['zone'] + s = node_formula(minion_formula(i)) + minion['node'], minion['zone'] = s['host'], s['zone'] - agents[master['id']] = master - agents[slave['id']] = slave + agents[primary['id']] = primary + agents[minion['id']] = minion else: if 'single_room' in accommodation: agent_id = '%s_agent_%s' % (unique, i) @@ -174,7 +174,7 @@ def filter_agents(agents, stack_outputs, override=None): agent.update(stack_values) # workaround of Nova bug 1422686 - if agent.get('mode') == 'slave' and not agent.get('ip'): + if agent.get('mode') == 'minion' and not agent.get('ip'): LOG.info('IP address is missing in agent: %s', agent) continue @@ -184,10 +184,10 @@ def filter_agents(agents, stack_outputs, override=None): result = {} for agent in deployed_agents.values(): if (agent.get('mode') == 'alone' or - (agent.get('mode') == 'master' and - agent.get('slave_id') in deployed_agents) or - (agent.get('mode') == 'slave' and - agent.get('master_id') in deployed_agents)): + (agent.get('mode') == 'primary' and + agent.get('minion_id') in deployed_agents) or + (agent.get('mode') == 'minion' and + agent.get('primary_id') in deployed_agents)): result[agent['id']] = agent return result @@ -214,12 +214,12 @@ def distribute_agents(agents, get_host_fn): if buckets['alone']: result = dict((a['id'], a) for a in buckets['alone']) else: - for master, slave in zip(buckets['master'], buckets['slave']): - master['slave_id'] = slave['id'] - slave['master_id'] = master['id'] + for primary, minion in zip(buckets['primary'], buckets['minion']): + primary['minion_id'] = minion['id'] + minion['primary_id'] = primary['id'] - result[master['id']] = master - result[slave['id']] = slave + result[primary['id']] = primary + result[minion['id']] = minion return result diff --git a/shaker/engine/executors/flent.py b/shaker/engine/executors/flent.py index 37eb519..c559a18 100644 --- a/shaker/engine/executors/flent.py +++ b/shaker/engine/executors/flent.py @@ -27,7 +27,7 @@ class FlentExecutor(base.BaseExecutor): def get_command(self): cmd = base.CommandLine('flent') cmd.add('-H', self.test_definition.get('host') or - self.agent['slave']['ip']) + self.agent['minion']['ip']) cmd.add('-l', self.test_definition.get('time') or 60) cmd.add('-s', self.test_definition.get('interval') or 1) cmd.add(self.test_definition.get('method') or 'tcp_download') diff --git a/shaker/engine/executors/iperf.py b/shaker/engine/executors/iperf.py index ad4a968..702f455 100644 --- a/shaker/engine/executors/iperf.py +++ b/shaker/engine/executors/iperf.py @@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__) def add_common_iperf_params(cmd, executor): cmd.add('--client', executor.test_definition.get('host') or - executor.agent['slave']['ip']) + executor.agent['minion']['ip']) cmd.add('--format', 'm') if executor.test_definition.get('mss'): cmd.add('--mss', executor.test_definition.get('mss')) diff --git a/shaker/engine/executors/netperf.py b/shaker/engine/executors/netperf.py index 5941220..f5d315c 100644 --- a/shaker/engine/executors/netperf.py +++ b/shaker/engine/executors/netperf.py @@ -20,7 +20,7 @@ class NetperfExecutor(base.BaseExecutor): def get_command(self): cmd = base.CommandLine('netperf') cmd.add('-H', self.test_definition.get('host') or - self.agent['slave']['ip']) + self.agent['minion']['ip']) cmd.add('-l', self.get_expected_duration()) cmd.add('-t', self.test_definition.get('method') or 'TCP_STREAM') return cmd.make() diff --git a/shaker/engine/server.py b/shaker/engine/server.py index f2fc364..9662955 100644 --- a/shaker/engine/server.py +++ b/shaker/engine/server.py @@ -39,10 +39,11 @@ def _extend_agents(agents_map): extended_agents = {} for agent in agents_map.values(): extended = copy.deepcopy(agent) - if agent.get('slave_id'): - extended['slave'] = copy.deepcopy(agents_map[agent['slave_id']]) - if agent.get('master_id'): - extended['master'] = copy.deepcopy(agents_map[agent['master_id']]) + if agent.get('minion_id'): + extended['minion'] = copy.deepcopy(agents_map[agent['minion_id']]) + if agent.get('primary_id'): + extended['primary'] = copy.deepcopy( + agents_map[agent['primary_id']]) extended_agents[agent['id']] = extended return extended_agents @@ -51,13 +52,13 @@ def _make_test_title(test, params=None): s = test.get('title') or test.get('class') if params: s += ' '.join([','.join(['%s=%s' % (k, v) for k, v in params.items() - if k != 'host'])]) + if k != 'host'])]) return re.sub(r'[^\x20-\x7e\x80-\xff]+', '_', s) def _pick_agents(agents, progression): - # slave agents do not execute any tests - agents = [a for a in agents.values() if a.get('mode') != 'slave'] + # minion agents do not execute any tests + agents = [a for a in agents.values() if a.get('mode') != 'minion'] if not progression: yield agents diff --git a/shaker/resources/image_builder_templates/centos.yaml b/shaker/resources/image_builder_templates/centos.yaml index c0040ba..bf6fc45 100644 --- a/shaker/resources/image_builder_templates/centos.yaml +++ b/shaker/resources/image_builder_templates/centos.yaml @@ -40,7 +40,7 @@ resources: router_id: { get_resource: router } subnet_id: { get_resource: private_subnet } - master_image: + primary_image: type: OS::Glance::Image properties: container_format: bare @@ -50,21 +50,21 @@ resources: min_ram: 512 name: shaker_image_build_template - master_image_server_port: + primary_image_server_port: type: OS::Neutron::Port properties: network_id: { get_resource: private_net } fixed_ips: - subnet_id: { get_resource: private_subnet } - master_image_server: + primary_image_server: type: OS::Nova::Server properties: name: shaker_image_builder_server - image: { get_resource: master_image } + image: { get_resource: primary_image } flavor: { get_param: flavor } networks: - - port: { get_resource: master_image_server_port } + - port: { get_resource: primary_image_server_port } user_data_format: RAW user_data: | #!/bin/bash @@ -121,4 +121,4 @@ resources: outputs: server_info: - value: { get_attr: [ master_image_server, show ] } + value: { get_attr: [ primary_image_server, show ] } diff --git a/shaker/resources/image_builder_templates/debian.yaml b/shaker/resources/image_builder_templates/debian.yaml index 6aa3996..9292756 100644 --- a/shaker/resources/image_builder_templates/debian.yaml +++ b/shaker/resources/image_builder_templates/debian.yaml @@ -40,7 +40,7 @@ resources: router_id: { get_resource: router } subnet_id: { get_resource: private_subnet } - master_image: + primary_image: type: OS::Glance::Image properties: container_format: bare @@ -50,21 +50,21 @@ resources: min_ram: 512 name: shaker_image_build_template - master_image_server_port: + primary_image_server_port: type: OS::Neutron::Port properties: network_id: { get_resource: private_net } fixed_ips: - subnet_id: { get_resource: private_subnet } - master_image_server: + primary_image_server: type: OS::Nova::Server properties: name: shaker_image_builder_server - image: { get_resource: master_image } + image: { get_resource: primary_image } flavor: { get_param: flavor } networks: - - port: { get_resource: master_image_server_port } + - port: { get_resource: primary_image_server_port } user_data_format: RAW user_data: | #!/bin/bash @@ -79,4 +79,4 @@ resources: outputs: server_info: - value: { get_attr: [ master_image_server, show ] } + value: { get_attr: [ primary_image_server, show ] } diff --git a/shaker/resources/image_builder_templates/ubuntu.yaml b/shaker/resources/image_builder_templates/ubuntu.yaml index f13fcba..45ce1ac 100644 --- a/shaker/resources/image_builder_templates/ubuntu.yaml +++ b/shaker/resources/image_builder_templates/ubuntu.yaml @@ -39,7 +39,7 @@ resources: router_id: { get_resource: router } subnet_id: { get_resource: private_subnet } - master_image: + primary_image: type: OS::Glance::Image properties: container_format: bare @@ -49,21 +49,21 @@ resources: min_ram: 512 name: shaker_image_build_template - master_image_server_port: + primary_image_server_port: type: OS::Neutron::Port properties: network_id: { get_resource: private_net } fixed_ips: - subnet_id: { get_resource: private_subnet } - master_image_server: + primary_image_server: type: OS::Nova::Server properties: name: shaker_image_builder_server - image: { get_resource: master_image } + image: { get_resource: primary_image } flavor: { get_param: flavor } networks: - - port: { get_resource: master_image_server_port } + - port: { get_resource: primary_image_server_port } user_data_format: RAW user_data: | #!/bin/bash @@ -97,4 +97,4 @@ resources: outputs: server_info: - value: { get_attr: [ master_image_server, show ] } + value: { get_attr: [ primary_image_server, show ] } diff --git a/shaker/resources/report_templates/interactive.html b/shaker/resources/report_templates/interactive.html index 9382bc9..933fe77 100644 --- a/shaker/resources/report_templates/interactive.html +++ b/shaker/resources/report_templates/interactive.html @@ -303,14 +303,14 @@ var agents = getAgentsByIds(agent_ids); if (agents.length == 0) return; - // add slave agents - var slave_agent_ids = {}; + // add minion agents + var minion_agent_ids = {}; $.each(agents, function(_i, agent){ - if (agent.slave_id) { - slave_agent_ids[agent.slave_id] = true; + if (agent.minion_id) { + minion_agent_ids[agent.minion_id] = true; } }); - agents = $.merge(agents, getAgentsByIds(slave_agent_ids)); + agents = $.merge(agents, getAgentsByIds(minion_agent_ids)); contentArea.append(agents_table_template({ agents: agents, diff --git a/shaker/scenarios/openstack/cross_az/full_l2.yaml b/shaker/scenarios/openstack/cross_az/full_l2.yaml index 49e25e3..1f25b9c 100644 --- a/shaker/scenarios/openstack/cross_az/full_l2.yaml +++ b/shaker/scenarios/openstack/cross_az/full_l2.yaml @@ -3,7 +3,7 @@ title: OpenStack L2 Cross-AZ description: In this scenario Shaker launches pairs of instances in the same tenant network. Every instance is hosted on a separate compute node, all available - compute nodes are utilized. The master and slave instances are in different + compute nodes are utilized. The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. The traffic goes within the tenant network (L2 domain). diff --git a/shaker/scenarios/openstack/cross_az/full_l3_east_west.yaml b/shaker/scenarios/openstack/cross_az/full_l3_east_west.yaml index 227787b..6f0fdfb 100644 --- a/shaker/scenarios/openstack/cross_az/full_l3_east_west.yaml +++ b/shaker/scenarios/openstack/cross_az/full_l3_east_west.yaml @@ -5,7 +5,7 @@ description: compute node. All available compute nodes are utilized. Instances are connected to one of 2 tenant networks, which plugged into single router. The traffic goes from one network to the other (L3 east-west). - The master and slave instances are in different availability zones. + The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. deployment: diff --git a/shaker/scenarios/openstack/cross_az/full_l3_north_south.yaml b/shaker/scenarios/openstack/cross_az/full_l3_north_south.yaml index fdcb11f..d0d22ee 100644 --- a/shaker/scenarios/openstack/cross_az/full_l3_north_south.yaml +++ b/shaker/scenarios/openstack/cross_az/full_l3_north_south.yaml @@ -3,9 +3,9 @@ title: OpenStack L3 North-South Cross-AZ description: In this scenario Shaker launches pairs of instances on different compute nodes. All available compute nodes are utilized. Instances are in different - networks connected to different routers, master accesses slave by + networks connected to different routers, primary accesses minion by floating ip. The traffic goes from one network via external network to the - other network. The master and slave instances are in different availability + other network. The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. diff --git a/shaker/scenarios/openstack/cross_az/l3_east_west.hot b/shaker/scenarios/openstack/cross_az/l3_east_west.hot index 2bcfede..0a12c54 100644 --- a/shaker/scenarios/openstack/cross_az/l3_east_west.hot +++ b/shaker/scenarios/openstack/cross_az/l3_east_west.hot @@ -2,7 +2,7 @@ heat_template_version: 2013-05-23 description: This Heat template creates a pair of networks plugged into the same router. - Master instances and slave instances are connected into different networks. + Primary instances and minion instances are connected into different networks. parameters: image: @@ -100,7 +100,7 @@ resources: "$SERVER_ENDPOINT": { get_param: server_endpoint } "$AGENT_ID": {{ agent.id }} -{% if agent.mode == 'master' %} +{% if agent.mode == 'primary' %} {{ agent.id }}_port: type: OS::Neutron::Port properties: @@ -124,7 +124,7 @@ outputs: {% for agent in agents.values() %} {{ agent.id }}_instance_name: value: { get_attr: [ {{ agent.id }}, instance_name ] } -{% if agent.mode == 'master' %} +{% if agent.mode == 'primary' %} {{ agent.id }}_ip: value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [east_private_net, name] }, 0 ] } {% else %} diff --git a/shaker/scenarios/openstack/cross_az/l3_north_south.hot b/shaker/scenarios/openstack/cross_az/l3_north_south.hot index 76ffa6c..144c97a 100644 --- a/shaker/scenarios/openstack/cross_az/l3_north_south.hot +++ b/shaker/scenarios/openstack/cross_az/l3_north_south.hot @@ -107,7 +107,7 @@ resources: "$SERVER_ENDPOINT": { get_param: server_endpoint } "$AGENT_ID": {{ agent.id }} -{% if agent.mode == 'master' %} +{% if agent.mode == 'primary' %} {{ agent.id }}_port: type: OS::Neutron::Port properties: @@ -138,7 +138,7 @@ outputs: {% for agent in agents.values() %} {{ agent.id }}_instance_name: value: { get_attr: [ {{ agent.id }}, instance_name ] } -{% if agent.mode == 'master' %} +{% if agent.mode == 'primary' %} {{ agent.id }}_ip: value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [north_private_net, name] }, 0 ] } {% else %} diff --git a/shaker/scenarios/openstack/cross_az/perf_l2.yaml b/shaker/scenarios/openstack/cross_az/perf_l2.yaml index 6c53394..ed84b1f 100644 --- a/shaker/scenarios/openstack/cross_az/perf_l2.yaml +++ b/shaker/scenarios/openstack/cross_az/perf_l2.yaml @@ -3,7 +3,7 @@ title: OpenStack L2 Cross-AZ Performance description: In this scenario Shaker launches 1 pair of instances in the same tenant network. Each instance is hosted on a separate compute node. - The master and slave instances are in different availability zones. + The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. deployment: diff --git a/shaker/scenarios/openstack/cross_az/perf_l3_east_west.yaml b/shaker/scenarios/openstack/cross_az/perf_l3_east_west.yaml index 606b172..fcc68a2 100644 --- a/shaker/scenarios/openstack/cross_az/perf_l3_east_west.yaml +++ b/shaker/scenarios/openstack/cross_az/perf_l3_east_west.yaml @@ -5,7 +5,7 @@ description: compute node. Instances are connected to one of 2 tenant networks, which plugged into single router. The traffic goes from one network to the other (L3 east-west). - The master and slave instances are in different availability zones. + The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. deployment: diff --git a/shaker/scenarios/openstack/cross_az/perf_l3_north_south.yaml b/shaker/scenarios/openstack/cross_az/perf_l3_north_south.yaml index 5f9a1fd..2b432d4 100644 --- a/shaker/scenarios/openstack/cross_az/perf_l3_north_south.yaml +++ b/shaker/scenarios/openstack/cross_az/perf_l3_north_south.yaml @@ -3,9 +3,9 @@ title: OpenStack L3 North-South Cross-AZ Performance description: In this scenario Shaker launches 1 pair of instances on different compute nodes. Instances are in different networks connected to different routers, - master accesses slave by floating ip. The traffic goes from one network + primary accesses minion by floating ip. The traffic goes from one network via external network to the other network. - The master and slave instances are in different availability zones. + The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. deployment: diff --git a/shaker/scenarios/openstack/cross_az/udp_l2.yaml b/shaker/scenarios/openstack/cross_az/udp_l2.yaml index 058db75..af2f229 100644 --- a/shaker/scenarios/openstack/cross_az/udp_l2.yaml +++ b/shaker/scenarios/openstack/cross_az/udp_l2.yaml @@ -4,7 +4,7 @@ description: In this scenario Shaker launches pairs of instances in the same tenant network. Every instance is hosted on a separate compute node. The load is generated by UDP traffic. - The master and slave instances are in different availability zones. + The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. deployment: diff --git a/shaker/scenarios/openstack/cross_az/udp_l2_mss8950.yaml b/shaker/scenarios/openstack/cross_az/udp_l2_mss8950.yaml index 9866e94..6e92db2 100644 --- a/shaker/scenarios/openstack/cross_az/udp_l2_mss8950.yaml +++ b/shaker/scenarios/openstack/cross_az/udp_l2_mss8950.yaml @@ -4,7 +4,7 @@ description: In this scenario Shaker launches pairs of instances in the same tenant network. Every instance is hosted on a separate compute node. The load is generated by UDP traffic and jumbo packets. - The master and slave instances are in different availability zones. + The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. deployment: diff --git a/shaker/scenarios/openstack/cross_az/udp_l3_east_west.yaml b/shaker/scenarios/openstack/cross_az/udp_l3_east_west.yaml index 647abbc..4a578fd 100644 --- a/shaker/scenarios/openstack/cross_az/udp_l3_east_west.yaml +++ b/shaker/scenarios/openstack/cross_az/udp_l3_east_west.yaml @@ -5,7 +5,7 @@ description: compute node. Instances are connected to one of 2 tenant networks, which plugged into single router. The traffic goes from one network to the other (L3 east-west). The load is generated by UDP traffic. - The master and slave instances are in different availability zones. + The primary and minion instances are in different availability zones. The scenario is used to test throughput between `nova` and `vcenter` zones. deployment: diff --git a/shaker/scenarios/openstack/full_l3_north_south.yaml b/shaker/scenarios/openstack/full_l3_north_south.yaml index ee20773..65f56a6 100644 --- a/shaker/scenarios/openstack/full_l3_north_south.yaml +++ b/shaker/scenarios/openstack/full_l3_north_south.yaml @@ -3,7 +3,7 @@ title: OpenStack L3 North-South description: In this scenario Shaker launches pairs of instances on different compute nodes. All available compute nodes are utilized. Instances are in different - networks connected to different routers, master accesses slave by + networks connected to different routers, primary accesses minion by floating ip. The traffic goes from one network via external network to the other network. diff --git a/shaker/scenarios/openstack/l3_east_west.hot b/shaker/scenarios/openstack/l3_east_west.hot index 5b90b2d..76b9b0e 100644 --- a/shaker/scenarios/openstack/l3_east_west.hot +++ b/shaker/scenarios/openstack/l3_east_west.hot @@ -2,7 +2,7 @@ heat_template_version: 2013-05-23 description: This Heat template creates a pair of networks plugged into the same router. - Master instances and slave instances are connected into different networks. + Primary instances and minion instances are connected into different networks. parameters: image: @@ -100,7 +100,7 @@ resources: "$SERVER_ENDPOINT": { get_param: server_endpoint } "$AGENT_ID": {{ agent.id }} -{% if agent.mode == 'master' %} +{% if agent.mode == 'primary' %} {{ agent.id }}_port: type: OS::Neutron::Port properties: @@ -124,7 +124,7 @@ outputs: {% for agent in agents.values() %} {{ agent.id }}_instance_name: value: { get_attr: [ {{ agent.id }}, instance_name ] } -{% if agent.mode == 'master' %} +{% if agent.mode == 'primary' %} {{ agent.id }}_ip: value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [east_private_net, name] }, 0 ] } {% else %} diff --git a/shaker/scenarios/openstack/l3_north_south.hot b/shaker/scenarios/openstack/l3_north_south.hot index 06b4b56..272db94 100644 --- a/shaker/scenarios/openstack/l3_north_south.hot +++ b/shaker/scenarios/openstack/l3_north_south.hot @@ -107,7 +107,7 @@ resources: "$SERVER_ENDPOINT": { get_param: server_endpoint } "$AGENT_ID": {{ agent.id }} -{% if agent.mode == 'master' %} +{% if agent.mode == 'primary' %} {{ agent.id }}_port: type: OS::Neutron::Port properties: @@ -138,7 +138,7 @@ outputs: {% for agent in agents.values() %} {{ agent.id }}_instance_name: value: { get_attr: [ {{ agent.id }}, instance_name ] } -{% if agent.mode == 'master' %} +{% if agent.mode == 'primary' %} {{ agent.id }}_ip: value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [north_private_net, name] }, 0 ] } {% else %} diff --git a/shaker/scenarios/openstack/perf_l3_north_south.yaml b/shaker/scenarios/openstack/perf_l3_north_south.yaml index 34a521a..87b9017 100644 --- a/shaker/scenarios/openstack/perf_l3_north_south.yaml +++ b/shaker/scenarios/openstack/perf_l3_north_south.yaml @@ -3,7 +3,7 @@ title: OpenStack L3 North-South Performance description: In this scenario Shaker launches 1 pair of instances on different compute nodes. Instances are in different networks connected to different routers, - master accesses slave by floating ip. The traffic goes from one network + primary accesses minion by floating ip. The traffic goes from one network via external network to the other network. deployment: diff --git a/shaker/scenarios/openstack/udp_l3_north_south.yaml b/shaker/scenarios/openstack/udp_l3_north_south.yaml index 1938645..dc8d1f8 100644 --- a/shaker/scenarios/openstack/udp_l3_north_south.yaml +++ b/shaker/scenarios/openstack/udp_l3_north_south.yaml @@ -3,7 +3,7 @@ title: OpenStack L3 North-South UDP description: In this scenario Shaker launches pairs of instances on different compute nodes. Instances are in different networks connected to different routers, - master accesses slave by floating ip. The traffic goes from one network + primary accesses minion by floating ip. The traffic goes from one network via external network to the other network. The load is generated by UDP traffic. diff --git a/shaker/scenarios/test/static_agents_pair.yaml b/shaker/scenarios/test/static_agents_pair.yaml index 82094d3..2cb3caf 100644 --- a/shaker/scenarios/test/static_agents_pair.yaml +++ b/shaker/scenarios/test/static_agents_pair.yaml @@ -9,13 +9,13 @@ deployment: - id: a-001 ip: 10.20.1.2 - mode: master - slave_id: a-002 + mode: primary + minion_id: a-002 - id: a-002 ip: 10.20.1.8 - mode: slave - master_id: a-001 + mode: minion + primary_id: a-001 execution: tests: diff --git a/shaker/tests/test_deploy.py b/shaker/tests/test_deploy.py index 5850072..2e28cdf 100644 --- a/shaker/tests/test_deploy.py +++ b/shaker/tests/test_deploy.py @@ -93,17 +93,17 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_pair_single_room(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'node': 'uno', 'zone': ZONE, - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:dos' % ZONE, 'zone': ZONE, 'node': 'dos'}, @@ -117,17 +117,17 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_pair_single_room_az_host(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'node': 'uno', 'zone': ZONE, - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:dos' % ZONE, 'zone': ZONE, 'node': 'dos'}, @@ -149,17 +149,17 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_pair_single_room_best_effort(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'node': 'uno', 'zone': ZONE, - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:uno' % ZONE, 'zone': ZONE, 'node': 'uno'}, @@ -174,17 +174,17 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_pair_single_room_best_effort_three_nodes(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'node': 'uno', 'zone': ZONE, - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:dos' % ZONE, 'zone': ZONE, 'node': 'dos'}, @@ -206,17 +206,17 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_pair_single_room_compute_nodes_best_effort(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'node': 'uno', 'zone': ZONE, - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:uno' % ZONE, 'zone': ZONE, 'node': 'uno'}, @@ -231,45 +231,45 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_pair_double_room(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'node': 'uno', 'zone': ZONE, - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:uno' % ZONE, 'zone': ZONE, 'node': 'uno'}, - 'UU1D_master_1': { - 'id': 'UU1D_master_1', - 'mode': 'master', + 'UU1D_primary_1': { + 'id': 'UU1D_primary_1', + 'mode': 'primary', 'availability_zone': '%s:dos' % ZONE, 'node': 'dos', 'zone': ZONE, - 'slave_id': 'UU1D_slave_1'}, - 'UU1D_slave_1': { - 'id': 'UU1D_slave_1', - 'master_id': 'UU1D_master_1', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_1'}, + 'UU1D_minion_1': { + 'id': 'UU1D_minion_1', + 'primary_id': 'UU1D_primary_1', + 'mode': 'minion', 'availability_zone': '%s:dos' % ZONE, 'zone': ZONE, 'node': 'dos'}, - 'UU1D_master_2': { - 'id': 'UU1D_master_2', - 'mode': 'master', + 'UU1D_primary_2': { + 'id': 'UU1D_primary_2', + 'mode': 'primary', 'availability_zone': '%s:tre' % ZONE, 'node': 'tre', 'zone': ZONE, - 'slave_id': 'UU1D_slave_2'}, - 'UU1D_slave_2': { - 'id': 'UU1D_slave_2', - 'master_id': 'UU1D_master_2', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_2'}, + 'UU1D_minion_2': { + 'id': 'UU1D_minion_2', + 'primary_id': 'UU1D_primary_2', + 'mode': 'minion', 'availability_zone': '%s:tre' % ZONE, 'zone': ZONE, 'node': 'tre'}, @@ -283,31 +283,31 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_pair_double_room_az_host(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'node': 'uno', 'zone': ZONE, - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:uno' % ZONE, 'zone': ZONE, 'node': 'uno'}, - 'UU1D_master_1': { - 'id': 'UU1D_master_1', - 'mode': 'master', + 'UU1D_primary_1': { + 'id': 'UU1D_primary_1', + 'mode': 'primary', 'availability_zone': '%s:dos' % ZONE, 'node': 'dos', 'zone': ZONE, - 'slave_id': 'UU1D_slave_1'}, - 'UU1D_slave_1': { - 'id': 'UU1D_slave_1', - 'master_id': 'UU1D_master_1', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_1'}, + 'UU1D_minion_1': { + 'id': 'UU1D_minion_1', + 'primary_id': 'UU1D_primary_1', + 'mode': 'minion', 'availability_zone': '%s:dos' % ZONE, 'zone': ZONE, 'node': 'dos'}, @@ -323,31 +323,31 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_pair_mixed_room(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'zone': ZONE, 'node': 'uno', - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:dos' % ZONE, 'zone': ZONE, 'node': 'dos'}, - 'UU1D_master_1': { - 'id': 'UU1D_master_1', - 'mode': 'master', + 'UU1D_primary_1': { + 'id': 'UU1D_primary_1', + 'mode': 'primary', 'availability_zone': '%s:dos' % ZONE, 'zone': ZONE, 'node': 'dos', - 'slave_id': 'UU1D_slave_1'}, - 'UU1D_slave_1': { - 'id': 'UU1D_slave_1', - 'master_id': 'UU1D_master_1', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_1'}, + 'UU1D_minion_1': { + 'id': 'UU1D_minion_1', + 'primary_id': 'UU1D_primary_1', + 'mode': 'minion', 'availability_zone': '%s:uno' % ZONE, 'zone': ZONE, 'node': 'uno'}, @@ -483,17 +483,17 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_zones_specified(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'slave_id': 'UU1D_slave_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'minion_id': 'UU1D_minion_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'zone': ZONE, 'node': 'uno'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:tre' % ZONE, 'zone': ZONE, 'node': 'tre'}, @@ -515,17 +515,17 @@ class TestDeploy(testtools.TestCase): mr.side_effect = lambda x, n: x[:n] unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'slave_id': 'UU1D_slave_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'minion_id': 'UU1D_minion_0', + 'mode': 'primary', 'availability_zone': '%s:uno' % ZONE, 'zone': ZONE, 'node': 'uno'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': '%s:tre' % ZONE, 'zone': ZONE, 'node': 'tre'}, @@ -547,31 +547,31 @@ class TestDeploy(testtools.TestCase): def test_generate_agents_cross_zones(self): unique = 'UU1D' expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'slave_id': 'UU1D_slave_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'minion_id': 'UU1D_minion_0', + 'mode': 'primary', 'availability_zone': 'nova:uno', 'zone': 'nova', 'node': 'uno'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'availability_zone': 'vcenter:tre', 'zone': 'vcenter', 'node': 'tre'}, - 'UU1D_master_1': { - 'id': 'UU1D_master_1', - 'slave_id': 'UU1D_slave_1', - 'mode': 'master', + 'UU1D_primary_1': { + 'id': 'UU1D_primary_1', + 'minion_id': 'UU1D_minion_1', + 'mode': 'primary', 'availability_zone': 'nova:duo', 'zone': 'nova', 'node': 'duo'}, - 'UU1D_slave_1': { - 'id': 'UU1D_slave_1', - 'master_id': 'UU1D_master_1', - 'mode': 'slave', + 'UU1D_minion_1': { + 'id': 'UU1D_minion_1', + 'primary_id': 'UU1D_primary_1', + 'mode': 'minion', 'availability_zone': 'vcenter:cinco', 'zone': 'vcenter', 'node': 'cinco'}, @@ -630,87 +630,87 @@ class TestDeploy(testtools.TestCase): def test_filter_agents_pair_single_room(self): agents = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'node': 'uno', - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'node': 'dos'}, } stack_outputs = { - 'UU1D_master_0_ip': '10.0.0.1', - 'UU1D_master_0_instance_name': 'i-000001', - 'UU1D_slave_0_ip': '10.0.0.2', - 'UU1D_slave_0_instance_name': 'i-000002', + 'UU1D_primary_0_ip': '10.0.0.1', + 'UU1D_primary_0_instance_name': 'i-000001', + 'UU1D_minion_0_ip': '10.0.0.2', + 'UU1D_minion_0_instance_name': 'i-000002', } - expected = {'UU1D_master_0': agents['UU1D_master_0'], - 'UU1D_slave_0': agents['UU1D_slave_0']} + expected = {'UU1D_primary_0': agents['UU1D_primary_0'], + 'UU1D_minion_0': agents['UU1D_minion_0']} filtered = deploy.filter_agents(agents, stack_outputs) self.assertEqual(expected, filtered) def test_filter_agents_pair_double_room_partially_deployed(self): agents = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'node': 'uno', - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'node': 'uno'}, - 'UU1D_master_1': { - 'id': 'UU1D_master_1', - 'mode': 'master', + 'UU1D_primary_1': { + 'id': 'UU1D_primary_1', + 'mode': 'primary', 'node': 'dos', - 'slave_id': 'UU1D_slave_1'}, - 'UU1D_slave_1': { - 'id': 'UU1D_slave_1', - 'master_id': 'UU1D_master_1', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_1'}, + 'UU1D_minion_1': { + 'id': 'UU1D_minion_1', + 'primary_id': 'UU1D_primary_1', + 'mode': 'minion', 'node': 'dos'}, } stack_outputs = { - 'UU1D_master_0_ip': '10.0.0.1', - 'UU1D_master_0_instance_name': 'i-000001', - 'UU1D_slave_0_ip': '10.0.0.2', - 'UU1D_slave_0_instance_name': 'i-000002', - 'UU1D_master_1_ip': '10.0.0.3', - 'UU1D_master_1_instance_name': 'i-000003', - 'UU1D_slave_1_instance_name': 'i-000004', + 'UU1D_primary_0_ip': '10.0.0.1', + 'UU1D_primary_0_instance_name': 'i-000001', + 'UU1D_minion_0_ip': '10.0.0.2', + 'UU1D_minion_0_instance_name': 'i-000002', + 'UU1D_primary_1_ip': '10.0.0.3', + 'UU1D_primary_1_instance_name': 'i-000003', + 'UU1D_minion_1_instance_name': 'i-000004', } - expected = {'UU1D_master_0': agents['UU1D_master_0'], - 'UU1D_slave_0': agents['UU1D_slave_0'], } + expected = {'UU1D_primary_0': agents['UU1D_primary_0'], + 'UU1D_minion_0': agents['UU1D_minion_0'], } filtered = deploy.filter_agents(agents, stack_outputs) self.assertEqual(expected, filtered) def test_filter_agents_pair_single_room_with_overrides(self): agents = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'node': 'uno', - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'node': 'dos'}, } ips = { - 'UU1D_master_0': '10.0.0.2', - 'UU1D_slave_0': '10.0.0.4', + 'UU1D_primary_0': '10.0.0.2', + 'UU1D_minion_0': '10.0.0.4', } stack_outputs = {} - expected = {'UU1D_master_0': agents['UU1D_master_0'], - 'UU1D_slave_0': agents['UU1D_slave_0']} + expected = {'UU1D_primary_0': agents['UU1D_primary_0'], + 'UU1D_minion_0': agents['UU1D_minion_0']} def override(agent): return dict(ip=ips[agent['id']]) @@ -718,7 +718,8 @@ class TestDeploy(testtools.TestCase): filtered = deploy.filter_agents(agents, stack_outputs, override=override) self.assertEqual(expected, filtered) - self.assertEqual(filtered['UU1D_master_0']['ip'], ips['UU1D_master_0']) + self.assertEqual(filtered['UU1D_primary_0'] + ['ip'], ips['UU1D_primary_0']) def test_prepare_for_cross_az(self): source = [ @@ -797,28 +798,30 @@ class TestDeploy(testtools.TestCase): create_stack_mock.return_value = uuid.uuid4() heat_outputs = { - stack_name + '_master_0_instance_name': 'instance-0000052f', - stack_name + '_master_0_ip': '192.0.0.3', - stack_name + '_slave_0_ip': '192.0.0.4', - stack_name + '_slave_0_instance_name': 'instance-0000052c'} + stack_name + '_primary_0_instance_name': 'instance-0000052f', + stack_name + '_primary_0_ip': '192.0.0.3', + stack_name + '_minion_0_ip': '192.0.0.4', + stack_name + '_minion_0_instance_name': 'instance-0000052c'} stack_output_mock.return_value = heat_outputs expected = { - 'shaker_abcdefg_master_0': {'availability_zone': 'nova:host-1', - 'id': 'shaker_abcdefg_master_0', - 'ip': cidr + '.3', - 'mode': 'master', + 'shaker_abcdefg_primary_0': {'availability_zone': 'nova:host-1', + 'id': 'shaker_abcdefg_primary_0', + 'ip': cidr + '.3', + 'mode': 'primary', + 'node': 'host-1', + 'minion_id': + 'shaker_abcdefg_minion_0', + 'zone': 'nova'}, + 'shaker_abcdefg_minion_0': {'availability_zone': 'nova:host-1', + 'id': 'shaker_abcdefg_minion_0', + 'ip': cidr + '.4', + 'primary_id': + 'shaker_abcdefg_primary_0', + 'mode': 'minion', 'node': 'host-1', - 'slave_id': 'shaker_abcdefg_slave_0', - 'zone': 'nova'}, - 'shaker_abcdefg_slave_0': {'availability_zone': 'nova:host-1', - 'id': 'shaker_abcdefg_slave_0', - 'ip': cidr + '.4', - 'master_id': 'shaker_abcdefg_master_0', - 'mode': 'slave', - 'node': 'host-1', - 'zone': 'nova'}} + 'zone': 'nova'}} agents = deployment._deploy_from_hot(scenario['deployment'], server_endpoint, @@ -856,28 +859,30 @@ class TestDeploy(testtools.TestCase): create_stack_mock.return_value = uuid.uuid4() heat_outputs = { - stack_name + '_master_0_instance_name': 'instance-0000052f', - stack_name + '_master_0_ip': '10.0.0.3', - stack_name + '_slave_0_ip': '10.0.0.4', - stack_name + '_slave_0_instance_name': 'instance-0000052c'} + stack_name + '_primary_0_instance_name': 'instance-0000052f', + stack_name + '_primary_0_ip': '10.0.0.3', + stack_name + '_minion_0_ip': '10.0.0.4', + stack_name + '_minion_0_instance_name': 'instance-0000052c'} stack_output_mock.return_value = heat_outputs expected = { - 'shaker_abcdefg_master_0': {'availability_zone': 'nova:host-1', - 'id': 'shaker_abcdefg_master_0', - 'ip': '10.0.0.3', - 'mode': 'master', + 'shaker_abcdefg_primary_0': {'availability_zone': 'nova:host-1', + 'id': 'shaker_abcdefg_primary_0', + 'ip': '10.0.0.3', + 'mode': 'primary', + 'node': 'host-1', + 'minion_id': + 'shaker_abcdefg_minion_0', + 'zone': 'nova'}, + 'shaker_abcdefg_minion_0': {'availability_zone': 'nova:host-1', + 'id': 'shaker_abcdefg_minion_0', + 'ip': '10.0.0.4', + 'primary_id': + 'shaker_abcdefg_primary_0', + 'mode': 'minion', 'node': 'host-1', - 'slave_id': 'shaker_abcdefg_slave_0', - 'zone': 'nova'}, - 'shaker_abcdefg_slave_0': {'availability_zone': 'nova:host-1', - 'id': 'shaker_abcdefg_slave_0', - 'ip': '10.0.0.4', - 'master_id': 'shaker_abcdefg_master_0', - 'mode': 'slave', - 'node': 'host-1', - 'zone': 'nova'}} + 'zone': 'nova'}} agents = deployment._deploy_from_hot(scenario['deployment'], server_endpoint, @@ -1174,32 +1179,32 @@ class TestDeploy(testtools.TestCase): def test_distribute_agents(self): agents = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'ip': '10.0.0.3', - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', 'ip': '10.0.0.4', - 'mode': 'slave'}, - 'UU1D_master_1': { - 'id': 'UU1D_master_1', - 'mode': 'master', + 'mode': 'minion'}, + 'UU1D_primary_1': { + 'id': 'UU1D_primary_1', + 'mode': 'primary', 'ip': '10.0.0.5', - 'slave_id': 'UU1D_slave_1'}, - 'UU1D_slave_1': { - 'id': 'UU1D_slave_1', - 'master_id': 'UU1D_master_1', + 'minion_id': 'UU1D_minion_1'}, + 'UU1D_minion_1': { + 'id': 'UU1D_minion_1', + 'primary_id': 'UU1D_primary_1', 'ip': '10.0.0.6', - 'mode': 'slave'}, + 'mode': 'minion'}, } hosts = { - 'UU1D_master_0': '001', - 'UU1D_slave_0': '002', - 'UU1D_master_1': '003', - 'UU1D_slave_1': '004', + 'UU1D_primary_0': '001', + 'UU1D_minion_0': '002', + 'UU1D_primary_1': '003', + 'UU1D_minion_1': '004', } expected = copy.deepcopy(agents) @@ -1214,47 +1219,47 @@ class TestDeploy(testtools.TestCase): # todo refactor code to use lists instead of dicts def _test_distribute_agents_collision(self): agents = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'ip': '10.0.0.3', - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', 'ip': '10.0.0.4', - 'mode': 'slave'}, - 'UU1D_master_1': { - 'id': 'UU1D_master_1', - 'mode': 'master', + 'mode': 'minion'}, + 'UU1D_primary_1': { + 'id': 'UU1D_primary_1', + 'mode': 'primary', 'ip': '10.0.0.5', - 'slave_id': 'UU1D_slave_1'}, - 'UU1D_slave_1': { - 'id': 'UU1D_slave_1', - 'master_id': 'UU1D_master_1', + 'minion_id': 'UU1D_minion_1'}, + 'UU1D_minion_1': { + 'id': 'UU1D_minion_1', + 'primary_id': 'UU1D_primary_1', 'ip': '10.0.0.6', - 'mode': 'slave'}, + 'mode': 'minion'}, } hosts = { - 'UU1D_master_0': '001', - 'UU1D_slave_0': '001', # collides with master_0 - 'UU1D_master_1': '003', - 'UU1D_slave_1': '004', + 'UU1D_primary_0': '001', + 'UU1D_minion_0': '001', # collides with primary_0 + 'UU1D_primary_1': '003', + 'UU1D_minion_1': '004', } expected = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'ip': '10.0.0.3', 'node': '001', - 'slave_id': 'UU1D_slave_1'}, - 'UU1D_slave_1': { - 'id': 'UU1D_slave_1', - 'master_id': 'UU1D_master_0', + 'minion_id': 'UU1D_minion_1'}, + 'UU1D_minion_1': { + 'id': 'UU1D_minion_1', + 'primary_id': 'UU1D_primary_0', 'ip': '10.0.0.6', 'node': '004', - 'mode': 'slave'}, + 'mode': 'minion'}, } observed = deploy.distribute_agents(agents, lambda x: hosts[x]) diff --git a/shaker/tests/test_flent_executor.py b/shaker/tests/test_flent_executor.py index 9cbea9a..7f096bc 100644 --- a/shaker/tests/test_flent_executor.py +++ b/shaker/tests/test_flent_executor.py @@ -19,7 +19,7 @@ from shaker.engine.executors import flent IP = '10.0.0.10' -AGENT = {'slave': {'ip': IP}} +AGENT = {'minion': {'ip': IP}} class TestFlentExecutor(testtools.TestCase): diff --git a/shaker/tests/test_iperf_executor.py b/shaker/tests/test_iperf_executor.py index 0e9ae95..84642bb 100644 --- a/shaker/tests/test_iperf_executor.py +++ b/shaker/tests/test_iperf_executor.py @@ -20,7 +20,7 @@ from shaker.engine.executors import iperf IP = '10.0.0.10' -AGENT = {'slave': {'ip': IP}} +AGENT = {'minion': {'ip': IP}} class TestIperfGraphExecutor(testtools.TestCase): diff --git a/shaker/tests/test_netperf_executor.py b/shaker/tests/test_netperf_executor.py index bb98cbf..b7c351c 100644 --- a/shaker/tests/test_netperf_executor.py +++ b/shaker/tests/test_netperf_executor.py @@ -19,7 +19,7 @@ from shaker.engine.executors import netperf IP = '10.0.0.10' -AGENT = {'slave': {'ip': IP}} +AGENT = {'minion': {'ip': IP}} class TestNetperfExecutor(testtools.TestCase): diff --git a/shaker/tests/test_server.py b/shaker/tests/test_server.py index ea42886..adee252 100644 --- a/shaker/tests/test_server.py +++ b/shaker/tests/test_server.py @@ -27,22 +27,22 @@ class TestServer(testtools.TestCase): def test_extend_agents(self): agents_map = { - 'UU1D_master_0': { - 'id': 'UU1D_master_0', - 'mode': 'master', + 'UU1D_primary_0': { + 'id': 'UU1D_primary_0', + 'mode': 'primary', 'node': 'uno', - 'slave_id': 'UU1D_slave_0'}, - 'UU1D_slave_0': { - 'id': 'UU1D_slave_0', - 'master_id': 'UU1D_master_0', - 'mode': 'slave', + 'minion_id': 'UU1D_minion_0'}, + 'UU1D_minion_0': { + 'id': 'UU1D_minion_0', + 'primary_id': 'UU1D_primary_0', + 'mode': 'minion', 'node': 'dos'}, } agents = server._extend_agents(agents_map) - self.assertDictContainsSubset(agents['UU1D_master_0']['slave'], - agents['UU1D_slave_0']) - self.assertDictContainsSubset(agents['UU1D_slave_0']['master'], - agents['UU1D_master_0']) + self.assertDictContainsSubset(agents['UU1D_primary_0']['minion'], + agents['UU1D_minion_0']) + self.assertDictContainsSubset(agents['UU1D_minion_0']['primary'], + agents['UU1D_primary_0']) def test_pick_agents_full(self): agents = {} @@ -54,18 +54,18 @@ class TestServer(testtools.TestCase): for arr in server._pick_agents(agents, None)] self.assertEqual([set(range(10))], picked) - def test_pick_agents_full_filter_slaves(self): + def test_pick_agents_full_filter_minions(self): agents = {} for i in range(10): - agents['master_%s' % i] = { - 'id': 'master_%s' % i, 'mode': 'master', 'node': 'uno', + agents['primary_%s' % i] = { + 'id': 'primary_%s' % i, 'mode': 'primary', 'node': 'uno', } - agents['slave_%s' % i] = { - 'id': 'slave_%s' % i, 'mode': 'slave', 'node': 'uno', + agents['minion_%s' % i] = { + 'id': 'minion_%s' % i, 'mode': 'minion', 'node': 'uno', } picked = [set(a['id'] for a in arr) for arr in server._pick_agents(agents, None)] - self.assertEqual([set('master_%s' % i for i in range(10))], + self.assertEqual([set('primary_%s' % i for i in range(10))], picked) def test_pick_agents_linear(self):