Initial Load of Nova FusionCompute Project
Initial work to enable the Nova FusionCompute project in git. The work done here provides: - .gitignore - Indicate which files not to track within Git. - .gitreview - Input to the git-review command on how to send to Gerrit. - .testr.conf - Conf file input for the testr command (UT) - CONTRIBUTING.rst - Information on how to contribute. - HACKING.rst - Information on what needs to be done for updates. - LICENSE - The license for the project - README.rst - Information on what this project is. Currently this is the blueprint. - openstack-common.conf - Required openstack configuration for all projects - setup.cfg - Input to the setup.py on how to execute certain actions. - setup.py - Used for build of the project. - requirements.txt - Required packages (and levels) to run the code. - test-requirements.txt - Required packages (and levels) in addition to the requirements, that indicates what is needed to run the UT. - tox.ini - The input for the tox commands. In addition, a base set of packages for the agent and unit tests were loaded in. Change-Id: Id76684afa9c8617b40e8b175785f94ce7fb9a1d6
This commit is contained in:
parent
9be8b6d8aa
commit
5421274a23
|
@ -0,0 +1,6 @@
|
|||
[run]
|
||||
branch = True
|
||||
source = nova-fusioncompute
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
|
@ -0,0 +1,58 @@
|
|||
*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Packages
|
||||
*.egg*
|
||||
*.egg-info
|
||||
dist
|
||||
build
|
||||
eggs
|
||||
parts
|
||||
bin
|
||||
var
|
||||
sdist
|
||||
develop-eggs
|
||||
.installed.cfg
|
||||
lib
|
||||
lib64
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
cover/
|
||||
.coverage*
|
||||
!.coveragerc
|
||||
.tox
|
||||
nosetests.xml
|
||||
.testrepository
|
||||
.venv
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
|
||||
# Mr Developer
|
||||
.mr.developer.cfg
|
||||
.project
|
||||
.pydevproject
|
||||
|
||||
# Complexity
|
||||
output/*.html
|
||||
output/*/index.html
|
||||
|
||||
# Sphinx
|
||||
doc/build
|
||||
|
||||
# pbr generates these
|
||||
AUTHORS
|
||||
ChangeLog
|
||||
|
||||
# Editors
|
||||
*~
|
||||
.*.swp
|
||||
.*sw?
|
||||
|
||||
# Files created by releasenotes build
|
||||
releasenotes/build
|
|
@ -0,0 +1,444 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="DBNavigator.Project.DataEditorManager">
|
||||
<record-view-column-sorting-type value="BY_INDEX" />
|
||||
<value-preview-text-wrapping value="true" />
|
||||
<value-preview-pinned value="false" />
|
||||
</component>
|
||||
<component name="DBNavigator.Project.DataExportManager">
|
||||
<export-instructions>
|
||||
<create-header value="true" />
|
||||
<quote-values-containing-separator value="true" />
|
||||
<quote-all-values value="false" />
|
||||
<value-separator value="" />
|
||||
<file-name value="" />
|
||||
<file-location value="" />
|
||||
<scope value="GLOBAL" />
|
||||
<destination value="FILE" />
|
||||
<format value="EXCEL" />
|
||||
<charset value="GBK" />
|
||||
</export-instructions>
|
||||
</component>
|
||||
<component name="DBNavigator.Project.DatabaseBrowserManager">
|
||||
<autoscroll-to-editor value="false" />
|
||||
<autoscroll-from-editor value="true" />
|
||||
<show-object-properties value="true" />
|
||||
<loaded-nodes />
|
||||
</component>
|
||||
<component name="DBNavigator.Project.EditorStateManager">
|
||||
<last-used-providers />
|
||||
</component>
|
||||
<component name="DBNavigator.Project.MethodExecutionManager">
|
||||
<method-browser />
|
||||
<execution-history>
|
||||
<group-entries value="true" />
|
||||
<execution-inputs />
|
||||
</execution-history>
|
||||
<argument-values-cache />
|
||||
</component>
|
||||
<component name="DBNavigator.Project.ObjectDependencyManager">
|
||||
<last-used-dependency-type value="INCOMING" />
|
||||
</component>
|
||||
<component name="DBNavigator.Project.ObjectQuickFilterManager">
|
||||
<last-used-operator value="EQUAL" />
|
||||
<filters />
|
||||
</component>
|
||||
<component name="DBNavigator.Project.ScriptExecutionManager" clear-outputs="true">
|
||||
<recently-used-interfaces />
|
||||
</component>
|
||||
<component name="DBNavigator.Project.Settings">
|
||||
<connections />
|
||||
<browser-settings>
|
||||
<general>
|
||||
<display-mode value="TABBED" />
|
||||
<navigation-history-size value="100" />
|
||||
<show-object-details value="false" />
|
||||
</general>
|
||||
<filters>
|
||||
<object-type-filter>
|
||||
<object-type name="SCHEMA" enabled="true" />
|
||||
<object-type name="USER" enabled="true" />
|
||||
<object-type name="ROLE" enabled="true" />
|
||||
<object-type name="PRIVILEGE" enabled="true" />
|
||||
<object-type name="CHARSET" enabled="true" />
|
||||
<object-type name="TABLE" enabled="true" />
|
||||
<object-type name="VIEW" enabled="true" />
|
||||
<object-type name="MATERIALIZED_VIEW" enabled="true" />
|
||||
<object-type name="NESTED_TABLE" enabled="true" />
|
||||
<object-type name="COLUMN" enabled="true" />
|
||||
<object-type name="INDEX" enabled="true" />
|
||||
<object-type name="CONSTRAINT" enabled="true" />
|
||||
<object-type name="DATASET_TRIGGER" enabled="true" />
|
||||
<object-type name="DATABASE_TRIGGER" enabled="true" />
|
||||
<object-type name="SYNONYM" enabled="true" />
|
||||
<object-type name="SEQUENCE" enabled="true" />
|
||||
<object-type name="PROCEDURE" enabled="true" />
|
||||
<object-type name="FUNCTION" enabled="true" />
|
||||
<object-type name="PACKAGE" enabled="true" />
|
||||
<object-type name="TYPE" enabled="true" />
|
||||
<object-type name="TYPE_ATTRIBUTE" enabled="true" />
|
||||
<object-type name="ARGUMENT" enabled="true" />
|
||||
<object-type name="DIMENSION" enabled="true" />
|
||||
<object-type name="CLUSTER" enabled="true" />
|
||||
<object-type name="DBLINK" enabled="true" />
|
||||
</object-type-filter>
|
||||
</filters>
|
||||
<sorting>
|
||||
<object-type name="COLUMN" sorting-type="NAME" />
|
||||
<object-type name="FUNCTION" sorting-type="NAME" />
|
||||
<object-type name="PROCEDURE" sorting-type="NAME" />
|
||||
<object-type name="ARGUMENT" sorting-type="POSITION" />
|
||||
</sorting>
|
||||
<default-editors>
|
||||
<object-type name="VIEW" editor-type="SELECTION" />
|
||||
<object-type name="PACKAGE" editor-type="SELECTION" />
|
||||
<object-type name="TYPE" editor-type="SELECTION" />
|
||||
</default-editors>
|
||||
</browser-settings>
|
||||
<navigation-settings>
|
||||
<lookup-filters>
|
||||
<lookup-objects>
|
||||
<object-type name="SCHEMA" enabled="true" />
|
||||
<object-type name="USER" enabled="false" />
|
||||
<object-type name="ROLE" enabled="false" />
|
||||
<object-type name="PRIVILEGE" enabled="false" />
|
||||
<object-type name="CHARSET" enabled="false" />
|
||||
<object-type name="TABLE" enabled="true" />
|
||||
<object-type name="VIEW" enabled="true" />
|
||||
<object-type name="MATERIALIZED VIEW" enabled="true" />
|
||||
<object-type name="NESTED TABLE" enabled="false" />
|
||||
<object-type name="COLUMN" enabled="false" />
|
||||
<object-type name="INDEX" enabled="true" />
|
||||
<object-type name="CONSTRAINT" enabled="true" />
|
||||
<object-type name="DATASET TRIGGER" enabled="true" />
|
||||
<object-type name="DATABASE TRIGGER" enabled="true" />
|
||||
<object-type name="SYNONYM" enabled="false" />
|
||||
<object-type name="SEQUENCE" enabled="true" />
|
||||
<object-type name="PROCEDURE" enabled="true" />
|
||||
<object-type name="FUNCTION" enabled="true" />
|
||||
<object-type name="PACKAGE" enabled="true" />
|
||||
<object-type name="TYPE" enabled="true" />
|
||||
<object-type name="TYPE ATTRIBUTE" enabled="false" />
|
||||
<object-type name="ARGUMENT" enabled="false" />
|
||||
<object-type name="DIMENSION" enabled="false" />
|
||||
<object-type name="CLUSTER" enabled="false" />
|
||||
<object-type name="DBLINK" enabled="true" />
|
||||
</lookup-objects>
|
||||
<force-database-load value="false" />
|
||||
<prompt-connection-selection value="true" />
|
||||
<prompt-schema-selection value="true" />
|
||||
</lookup-filters>
|
||||
</navigation-settings>
|
||||
<dataset-grid-settings>
|
||||
<general>
|
||||
<enable-zooming value="true" />
|
||||
</general>
|
||||
<sorting>
|
||||
<nulls-first value="true" />
|
||||
<max-sorting-columns value="4" />
|
||||
</sorting>
|
||||
<tracking-columns>
|
||||
<columnNames value="" />
|
||||
<visible value="true" />
|
||||
<editable value="false" />
|
||||
</tracking-columns>
|
||||
</dataset-grid-settings>
|
||||
<dataset-editor-settings>
|
||||
<text-editor-popup>
|
||||
<active value="false" />
|
||||
<active-if-empty value="false" />
|
||||
<data-length-threshold value="100" />
|
||||
<popup-delay value="1000" />
|
||||
</text-editor-popup>
|
||||
<values-list-popup>
|
||||
<show-popup-button value="true" />
|
||||
<element-count-threshold value="1000" />
|
||||
<data-length-threshold value="250" />
|
||||
</values-list-popup>
|
||||
<general>
|
||||
<fetch-block-size value="100" />
|
||||
<fetch-timeout value="30" />
|
||||
<trim-whitespaces value="true" />
|
||||
<convert-empty-strings-to-null value="true" />
|
||||
<select-content-on-cell-edit value="true" />
|
||||
<large-value-preview-active value="true" />
|
||||
</general>
|
||||
<filters>
|
||||
<prompt-filter-dialog value="true" />
|
||||
<default-filter-type value="BASIC" />
|
||||
</filters>
|
||||
<qualified-text-editor text-length-threshold="300">
|
||||
<content-types>
|
||||
<content-type name="Text" enabled="true" />
|
||||
<content-type name="XML" enabled="true" />
|
||||
<content-type name="DTD" enabled="true" />
|
||||
<content-type name="HTML" enabled="true" />
|
||||
<content-type name="XHTML" enabled="true" />
|
||||
<content-type name="SQL" enabled="true" />
|
||||
<content-type name="PL/SQL" enabled="true" />
|
||||
</content-types>
|
||||
</qualified-text-editor>
|
||||
<record-navigation>
|
||||
<navigation-target value="VIEWER" />
|
||||
</record-navigation>
|
||||
</dataset-editor-settings>
|
||||
<code-editor-settings>
|
||||
<general>
|
||||
<show-object-navigation-gutter value="false" />
|
||||
<show-spec-declaration-navigation-gutter value="true" />
|
||||
</general>
|
||||
<confirmations>
|
||||
<save-changes value="false" />
|
||||
<revert-changes value="true" />
|
||||
</confirmations>
|
||||
</code-editor-settings>
|
||||
<code-completion-settings>
|
||||
<filters>
|
||||
<basic-filter>
|
||||
<filter-element type="RESERVED_WORD" id="keyword" selected="true" />
|
||||
<filter-element type="RESERVED_WORD" id="function" selected="true" />
|
||||
<filter-element type="RESERVED_WORD" id="parameter" selected="true" />
|
||||
<filter-element type="RESERVED_WORD" id="datatype" selected="true" />
|
||||
<filter-element type="RESERVED_WORD" id="exception" selected="true" />
|
||||
<filter-element type="OBJECT" id="schema" selected="true" />
|
||||
<filter-element type="OBJECT" id="role" selected="true" />
|
||||
<filter-element type="OBJECT" id="user" selected="true" />
|
||||
<filter-element type="OBJECT" id="privilege" selected="true" />
|
||||
<user-schema>
|
||||
<filter-element type="OBJECT" id="table" selected="true" />
|
||||
<filter-element type="OBJECT" id="view" selected="true" />
|
||||
<filter-element type="OBJECT" id="materialized view" selected="true" />
|
||||
<filter-element type="OBJECT" id="index" selected="true" />
|
||||
<filter-element type="OBJECT" id="constraint" selected="true" />
|
||||
<filter-element type="OBJECT" id="trigger" selected="true" />
|
||||
<filter-element type="OBJECT" id="synonym" selected="false" />
|
||||
<filter-element type="OBJECT" id="sequence" selected="true" />
|
||||
<filter-element type="OBJECT" id="procedure" selected="true" />
|
||||
<filter-element type="OBJECT" id="function" selected="true" />
|
||||
<filter-element type="OBJECT" id="package" selected="true" />
|
||||
<filter-element type="OBJECT" id="type" selected="true" />
|
||||
<filter-element type="OBJECT" id="dimension" selected="true" />
|
||||
<filter-element type="OBJECT" id="cluster" selected="true" />
|
||||
<filter-element type="OBJECT" id="dblink" selected="true" />
|
||||
</user-schema>
|
||||
<public-schema>
|
||||
<filter-element type="OBJECT" id="table" selected="false" />
|
||||
<filter-element type="OBJECT" id="view" selected="false" />
|
||||
<filter-element type="OBJECT" id="materialized view" selected="false" />
|
||||
<filter-element type="OBJECT" id="index" selected="false" />
|
||||
<filter-element type="OBJECT" id="constraint" selected="false" />
|
||||
<filter-element type="OBJECT" id="trigger" selected="false" />
|
||||
<filter-element type="OBJECT" id="synonym" selected="false" />
|
||||
<filter-element type="OBJECT" id="sequence" selected="false" />
|
||||
<filter-element type="OBJECT" id="procedure" selected="false" />
|
||||
<filter-element type="OBJECT" id="function" selected="false" />
|
||||
<filter-element type="OBJECT" id="package" selected="false" />
|
||||
<filter-element type="OBJECT" id="type" selected="false" />
|
||||
<filter-element type="OBJECT" id="dimension" selected="false" />
|
||||
<filter-element type="OBJECT" id="cluster" selected="false" />
|
||||
<filter-element type="OBJECT" id="dblink" selected="false" />
|
||||
</public-schema>
|
||||
<any-schema>
|
||||
<filter-element type="OBJECT" id="table" selected="true" />
|
||||
<filter-element type="OBJECT" id="view" selected="true" />
|
||||
<filter-element type="OBJECT" id="materialized view" selected="true" />
|
||||
<filter-element type="OBJECT" id="index" selected="true" />
|
||||
<filter-element type="OBJECT" id="constraint" selected="true" />
|
||||
<filter-element type="OBJECT" id="trigger" selected="true" />
|
||||
<filter-element type="OBJECT" id="synonym" selected="true" />
|
||||
<filter-element type="OBJECT" id="sequence" selected="true" />
|
||||
<filter-element type="OBJECT" id="procedure" selected="true" />
|
||||
<filter-element type="OBJECT" id="function" selected="true" />
|
||||
<filter-element type="OBJECT" id="package" selected="true" />
|
||||
<filter-element type="OBJECT" id="type" selected="true" />
|
||||
<filter-element type="OBJECT" id="dimension" selected="true" />
|
||||
<filter-element type="OBJECT" id="cluster" selected="true" />
|
||||
<filter-element type="OBJECT" id="dblink" selected="true" />
|
||||
</any-schema>
|
||||
</basic-filter>
|
||||
<extended-filter>
|
||||
<filter-element type="RESERVED_WORD" id="keyword" selected="true" />
|
||||
<filter-element type="RESERVED_WORD" id="function" selected="true" />
|
||||
<filter-element type="RESERVED_WORD" id="parameter" selected="true" />
|
||||
<filter-element type="RESERVED_WORD" id="datatype" selected="true" />
|
||||
<filter-element type="RESERVED_WORD" id="exception" selected="true" />
|
||||
<filter-element type="OBJECT" id="schema" selected="true" />
|
||||
<filter-element type="OBJECT" id="user" selected="true" />
|
||||
<filter-element type="OBJECT" id="role" selected="true" />
|
||||
<filter-element type="OBJECT" id="privilege" selected="true" />
|
||||
<user-schema>
|
||||
<filter-element type="OBJECT" id="table" selected="true" />
|
||||
<filter-element type="OBJECT" id="view" selected="true" />
|
||||
<filter-element type="OBJECT" id="materialized view" selected="true" />
|
||||
<filter-element type="OBJECT" id="index" selected="true" />
|
||||
<filter-element type="OBJECT" id="constraint" selected="true" />
|
||||
<filter-element type="OBJECT" id="trigger" selected="true" />
|
||||
<filter-element type="OBJECT" id="synonym" selected="true" />
|
||||
<filter-element type="OBJECT" id="sequence" selected="true" />
|
||||
<filter-element type="OBJECT" id="procedure" selected="true" />
|
||||
<filter-element type="OBJECT" id="function" selected="true" />
|
||||
<filter-element type="OBJECT" id="package" selected="true" />
|
||||
<filter-element type="OBJECT" id="type" selected="true" />
|
||||
<filter-element type="OBJECT" id="dimension" selected="true" />
|
||||
<filter-element type="OBJECT" id="cluster" selected="true" />
|
||||
<filter-element type="OBJECT" id="dblink" selected="true" />
|
||||
</user-schema>
|
||||
<public-schema>
|
||||
<filter-element type="OBJECT" id="table" selected="true" />
|
||||
<filter-element type="OBJECT" id="view" selected="true" />
|
||||
<filter-element type="OBJECT" id="materialized view" selected="true" />
|
||||
<filter-element type="OBJECT" id="index" selected="true" />
|
||||
<filter-element type="OBJECT" id="constraint" selected="true" />
|
||||
<filter-element type="OBJECT" id="trigger" selected="true" />
|
||||
<filter-element type="OBJECT" id="synonym" selected="true" />
|
||||
<filter-element type="OBJECT" id="sequence" selected="true" />
|
||||
<filter-element type="OBJECT" id="procedure" selected="true" />
|
||||
<filter-element type="OBJECT" id="function" selected="true" />
|
||||
<filter-element type="OBJECT" id="package" selected="true" />
|
||||
<filter-element type="OBJECT" id="type" selected="true" />
|
||||
<filter-element type="OBJECT" id="dimension" selected="true" />
|
||||
<filter-element type="OBJECT" id="cluster" selected="true" />
|
||||
<filter-element type="OBJECT" id="dblink" selected="true" />
|
||||
</public-schema>
|
||||
<any-schema>
|
||||
<filter-element type="OBJECT" id="table" selected="true" />
|
||||
<filter-element type="OBJECT" id="view" selected="true" />
|
||||
<filter-element type="OBJECT" id="materialized view" selected="true" />
|
||||
<filter-element type="OBJECT" id="index" selected="true" />
|
||||
<filter-element type="OBJECT" id="constraint" selected="true" />
|
||||
<filter-element type="OBJECT" id="trigger" selected="true" />
|
||||
<filter-element type="OBJECT" id="synonym" selected="true" />
|
||||
<filter-element type="OBJECT" id="sequence" selected="true" />
|
||||
<filter-element type="OBJECT" id="procedure" selected="true" />
|
||||
<filter-element type="OBJECT" id="function" selected="true" />
|
||||
<filter-element type="OBJECT" id="package" selected="true" />
|
||||
<filter-element type="OBJECT" id="type" selected="true" />
|
||||
<filter-element type="OBJECT" id="dimension" selected="true" />
|
||||
<filter-element type="OBJECT" id="cluster" selected="true" />
|
||||
<filter-element type="OBJECT" id="dblink" selected="true" />
|
||||
</any-schema>
|
||||
</extended-filter>
|
||||
</filters>
|
||||
<sorting enabled="true">
|
||||
<sorting-element type="RESERVED_WORD" id="keyword" />
|
||||
<sorting-element type="RESERVED_WORD" id="datatype" />
|
||||
<sorting-element type="OBJECT" id="column" />
|
||||
<sorting-element type="OBJECT" id="table" />
|
||||
<sorting-element type="OBJECT" id="view" />
|
||||
<sorting-element type="OBJECT" id="materialized view" />
|
||||
<sorting-element type="OBJECT" id="index" />
|
||||
<sorting-element type="OBJECT" id="constraint" />
|
||||
<sorting-element type="OBJECT" id="trigger" />
|
||||
<sorting-element type="OBJECT" id="synonym" />
|
||||
<sorting-element type="OBJECT" id="sequence" />
|
||||
<sorting-element type="OBJECT" id="procedure" />
|
||||
<sorting-element type="OBJECT" id="function" />
|
||||
<sorting-element type="OBJECT" id="package" />
|
||||
<sorting-element type="OBJECT" id="type" />
|
||||
<sorting-element type="OBJECT" id="dimension" />
|
||||
<sorting-element type="OBJECT" id="cluster" />
|
||||
<sorting-element type="OBJECT" id="dblink" />
|
||||
<sorting-element type="OBJECT" id="schema" />
|
||||
<sorting-element type="OBJECT" id="role" />
|
||||
<sorting-element type="OBJECT" id="user" />
|
||||
<sorting-element type="RESERVED_WORD" id="function" />
|
||||
<sorting-element type="RESERVED_WORD" id="parameter" />
|
||||
</sorting>
|
||||
<format>
|
||||
<enforce-code-style-case value="true" />
|
||||
</format>
|
||||
</code-completion-settings>
|
||||
<execution-engine-settings>
|
||||
<statement-execution>
|
||||
<fetch-block-size value="100" />
|
||||
<execution-timeout value="20" />
|
||||
<debug-execution-timeout value="600" />
|
||||
<focus-result value="false" />
|
||||
<prompt-execution value="false" />
|
||||
</statement-execution>
|
||||
<script-execution>
|
||||
<command-line-interfaces />
|
||||
<execution-timeout value="300" />
|
||||
</script-execution>
|
||||
<method-execution>
|
||||
<execution-timeout value="30" />
|
||||
<debug-execution-timeout value="600" />
|
||||
<parameter-history-size value="10" />
|
||||
</method-execution>
|
||||
</execution-engine-settings>
|
||||
<operation-settings>
|
||||
<transactions>
|
||||
<uncommitted-changes>
|
||||
<on-project-close value="ASK" />
|
||||
<on-disconnect value="ASK" />
|
||||
<on-autocommit-toggle value="ASK" />
|
||||
</uncommitted-changes>
|
||||
<multiple-uncommitted-changes>
|
||||
<on-commit value="ASK" />
|
||||
<on-rollback value="ASK" />
|
||||
</multiple-uncommitted-changes>
|
||||
</transactions>
|
||||
<session-browser>
|
||||
<disconnect-session value="ASK" />
|
||||
<kill-session value="ASK" />
|
||||
<reload-on-filter-change value="false" />
|
||||
</session-browser>
|
||||
<compiler>
|
||||
<compile-type value="KEEP" />
|
||||
<compile-dependencies value="ASK" />
|
||||
<always-show-controls value="false" />
|
||||
</compiler>
|
||||
<debugger>
|
||||
<debugger-type value="JDBC" />
|
||||
<use-generic-runners value="true" />
|
||||
</debugger>
|
||||
</operation-settings>
|
||||
<ddl-file-settings>
|
||||
<extensions>
|
||||
<mapping file-type-id="VIEW" extensions="vw" />
|
||||
<mapping file-type-id="TRIGGER" extensions="trg" />
|
||||
<mapping file-type-id="PROCEDURE" extensions="prc" />
|
||||
<mapping file-type-id="FUNCTION" extensions="fnc" />
|
||||
<mapping file-type-id="PACKAGE" extensions="pkg" />
|
||||
<mapping file-type-id="PACKAGE_SPEC" extensions="pks" />
|
||||
<mapping file-type-id="PACKAGE_BODY" extensions="pkb" />
|
||||
<mapping file-type-id="TYPE" extensions="tpe" />
|
||||
<mapping file-type-id="TYPE_SPEC" extensions="tps" />
|
||||
<mapping file-type-id="TYPE_BODY" extensions="tpb" />
|
||||
</extensions>
|
||||
<general>
|
||||
<lookup-ddl-files value="true" />
|
||||
<create-ddl-files value="false" />
|
||||
<synchronize-ddl-files value="true" />
|
||||
<use-qualified-names value="false" />
|
||||
<make-scripts-rerunnable value="true" />
|
||||
</general>
|
||||
</ddl-file-settings>
|
||||
<general-settings>
|
||||
<regional-settings>
|
||||
<date-format value="MEDIUM" />
|
||||
<number-format value="UNGROUPED" />
|
||||
<locale value="SYSTEM_DEFAULT" />
|
||||
<use-custom-formats value="false" />
|
||||
</regional-settings>
|
||||
<environment>
|
||||
<environment-types>
|
||||
<environment-type id="development" name="Development" description="Development environment" color="-2430209/-12296320" readonly-code="false" readonly-data="false" />
|
||||
<environment-type id="integration" name="Integration" description="Integration environment" color="-2621494/-12163514" readonly-code="true" readonly-data="false" />
|
||||
<environment-type id="production" name="Production" description="Productive environment" color="-11574/-10271420" readonly-code="true" readonly-data="true" />
|
||||
<environment-type id="other" name="Other" description="" color="-1576/-10724543" readonly-code="false" readonly-data="false" />
|
||||
</environment-types>
|
||||
<visibility-settings>
|
||||
<connection-tabs value="true" />
|
||||
<dialog-headers value="true" />
|
||||
<object-editor-tabs value="true" />
|
||||
<script-editor-tabs value="false" />
|
||||
<execution-result-tabs value="true" />
|
||||
</visibility-settings>
|
||||
</environment>
|
||||
</general-settings>
|
||||
</component>
|
||||
<component name="DBNavigator.Project.StatementExecutionManager">
|
||||
<execution-variables />
|
||||
</component>
|
||||
</project>
|
|
@ -0,0 +1,48 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectDictionaryState">
|
||||
<dictionary name="z00345351" />
|
||||
</component>
|
||||
<component name="ProjectInspectionProfilesVisibleTreeState">
|
||||
<entry key="Project Default">
|
||||
<profile-state>
|
||||
<expanded-state>
|
||||
<State>
|
||||
<id />
|
||||
</State>
|
||||
</expanded-state>
|
||||
<selected-state>
|
||||
<State>
|
||||
<id>Buildout</id>
|
||||
</State>
|
||||
</selected-state>
|
||||
</profile-state>
|
||||
</entry>
|
||||
</component>
|
||||
<component name="ProjectLevelVcsManager" settingsEditedManually="false">
|
||||
<OptionsSetting value="true" id="Add" />
|
||||
<OptionsSetting value="true" id="Remove" />
|
||||
<OptionsSetting value="true" id="Checkout" />
|
||||
<OptionsSetting value="true" id="Update" />
|
||||
<OptionsSetting value="true" id="Status" />
|
||||
<OptionsSetting value="true" id="Edit" />
|
||||
<ConfirmationsSetting value="0" id="Add" />
|
||||
<ConfirmationsSetting value="0" id="Remove" />
|
||||
</component>
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 2.7.10 (C:\Python27\python.exe)" project-jdk-type="Python SDK" />
|
||||
<component name="masterDetails">
|
||||
<states>
|
||||
<state key="ScopeChooserConfigurable.UI">
|
||||
<settings>
|
||||
<splitter-proportions>
|
||||
<option name="proportions">
|
||||
<list>
|
||||
<option value="0.2" />
|
||||
</list>
|
||||
</option>
|
||||
</splitter-proportions>
|
||||
</settings>
|
||||
</state>
|
||||
</states>
|
||||
</component>
|
||||
</project>
|
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/nova-fusioncompute.iml" filepath="$PROJECT_DIR$/.idea/nova-fusioncompute.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="PYTHON_MODULE" version="4">
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
|
@ -0,0 +1,47 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ChangeListManager">
|
||||
<option name="TRACKING_ENABLED" value="true" />
|
||||
<option name="SHOW_DIALOG" value="false" />
|
||||
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
||||
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
||||
<option name="LAST_RESOLUTION" value="IGNORE" />
|
||||
</component>
|
||||
<component name="ChangesViewManager" flattened_view="true" show_ignored="false" />
|
||||
<component name="CreatePatchCommitExecutor">
|
||||
<option name="PATCH_PATH" value="" />
|
||||
</component>
|
||||
<component name="ProjectLevelVcsManager" settingsEditedManually="false">
|
||||
<OptionsSetting value="true" id="Add" />
|
||||
<OptionsSetting value="true" id="Remove" />
|
||||
<OptionsSetting value="true" id="Checkout" />
|
||||
<OptionsSetting value="true" id="Update" />
|
||||
<OptionsSetting value="true" id="Status" />
|
||||
<OptionsSetting value="true" id="Edit" />
|
||||
<ConfirmationsSetting value="0" id="Add" />
|
||||
<ConfirmationsSetting value="0" id="Remove" />
|
||||
</component>
|
||||
<component name="PropertiesComponent">
|
||||
<property name="settings.editor.selected.configurable" value="project.propVCSSupport.Mappings" />
|
||||
<property name="settings.editor.splitter.proportion" value="0.2" />
|
||||
</component>
|
||||
<component name="ShelveChangesManager" show_recycled="false">
|
||||
<option name="remove_strategy" value="false" />
|
||||
</component>
|
||||
<component name="TaskManager">
|
||||
<task active="true" id="Default" summary="Default task">
|
||||
<created>1477020328801</created>
|
||||
<option name="number" value="Default" />
|
||||
<option name="presentableId" value="Default" />
|
||||
<updated>1477020328801</updated>
|
||||
</task>
|
||||
<servers />
|
||||
</component>
|
||||
<component name="VcsContentAnnotationSettings">
|
||||
<option name="myLimit" value="2678400000" />
|
||||
</component>
|
||||
<component name="XDebuggerManager">
|
||||
<breakpoint-manager />
|
||||
<watches-manager />
|
||||
</component>
|
||||
</project>
|
|
@ -0,0 +1,3 @@
|
|||
# Format is:
|
||||
# <preferred e-mail> <other e-mail 1>
|
||||
# <preferred e-mail> <other e-mail 2>
|
|
@ -0,0 +1,7 @@
|
|||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
|
@ -0,0 +1,17 @@
|
|||
If you would like to contribute to the development of OpenStack, you must
|
||||
follow the steps in this page:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html
|
||||
|
||||
If you already have a good understanding of how the system works and your
|
||||
OpenStack accounts are set up, you can skip to the development workflow
|
||||
section of this documentation to learn how changes to OpenStack should be
|
||||
submitted for review via the Gerrit tool:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Pull requests submitted through GitHub will be ignored.
|
||||
|
||||
Bugs should be filed on Launchpad, not GitHub:
|
||||
|
||||
https://bugs.launchpad.net/nova-fusioncompute
|
|
@ -0,0 +1,4 @@
|
|||
nova-fusioncompute Style Commandments
|
||||
===============================================
|
||||
|
||||
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
|
|
@ -0,0 +1,176 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
include AUTHORS
|
||||
include ChangeLog
|
||||
exclude .gitignore
|
||||
exclude .gitreview
|
||||
|
||||
global-exclude *.pyc
|
|
@ -0,0 +1,19 @@
|
|||
===============================
|
||||
nova-fusioncompute
|
||||
===============================
|
||||
|
||||
nova-fusioncompute is Huawei FusionCompute[1] virtualization driver for OpenStack Nova
|
||||
|
||||
Please fill here a long description which must be at least 3 lines wrapped on
|
||||
80 cols, so that distribution package maintainers can use it in their packages.
|
||||
Note that this is a hard requirement.
|
||||
|
||||
* Free software: Apache license
|
||||
* Documentation: http://docs.openstack.org/developer/nova-fusioncompute
|
||||
* Source: http://git.openstack.org/cgit/openstack/nova-fusioncompute
|
||||
* Bugs: http://bugs.launchpad.net/nova-fusioncompute
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* TODO
|
|
@ -0,0 +1,75 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.intersphinx',
|
||||
'oslosphinx'
|
||||
]
|
||||
|
||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||
# text edit cycles.
|
||||
# execute "export SPHINX_DEBUG=1" in your terminal to disable
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'nova-fusioncompute'
|
||||
copyright = u'2016, OpenStack Foundation'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
add_module_names = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
# html_theme_path = ["."]
|
||||
# html_theme = '_theme'
|
||||
# html_static_path = ['static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = '%sdoc' % project
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
u'%s Documentation' % project,
|
||||
u'OpenStack Foundation', 'manual'),
|
||||
]
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
#intersphinx_mapping = {'http://docs.python.org/': None}
|
|
@ -0,0 +1,4 @@
|
|||
============
|
||||
Contributing
|
||||
============
|
||||
.. include:: ../../CONTRIBUTING.rst
|
|
@ -0,0 +1,25 @@
|
|||
.. nova-fusioncompute documentation master file, created by
|
||||
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to nova-fusioncompute's documentation!
|
||||
========================================================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
readme
|
||||
installation
|
||||
usage
|
||||
contributing
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
============
|
||||
Installation
|
||||
============
|
||||
|
||||
At the command line::
|
||||
|
||||
$ pip install nova-fusioncompute
|
||||
|
||||
Or, if you have virtualenvwrapper installed::
|
||||
|
||||
$ mkvirtualenv nova-fusioncompute
|
||||
$ pip install nova-fusioncompute
|
|
@ -0,0 +1 @@
|
|||
.. include:: ../../README.rst
|
|
@ -0,0 +1,7 @@
|
|||
========
|
||||
Usage
|
||||
========
|
||||
|
||||
To use nova-fusioncompute in a project::
|
||||
|
||||
import nova-fusioncompute
|
|
@ -0,0 +1,16 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslotest import base
|
||||
|
||||
|
||||
class TestConf(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(TestConf, self).setUp()
|
||||
|
||||
def test_null(self):
|
||||
"""Initial test to pass py27."""
|
||||
|
||||
test_result = "pass"
|
||||
self.assertEqual("pass", test_result)
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
|
|
@ -0,0 +1,20 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
|
@ -0,0 +1,528 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import json
|
||||
import math
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.virt.fusioncomputeapi import constant
|
||||
from nova.virt.fusioncomputeapi import exception as fc_exc
|
||||
from nova.virt.fusioncomputeapi.fcinstance import FC_INSTANCE_MANAGER as FC_MGR
|
||||
from nova.virt.fusioncomputeapi import ops_task_base
|
||||
from nova.virt.fusioncomputeapi import utils
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
UTC_TIME_TO_SEC = 1000
|
||||
|
||||
|
||||
class ClusterOps(ops_task_base.OpsTaskBase):
|
||||
"""cluster system manager and driver resouce info
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fc_client, task_ops):
|
||||
super(ClusterOps, self).__init__(fc_client, task_ops)
|
||||
self._stats = {}
|
||||
self.clusters = {}
|
||||
self.resources = []
|
||||
|
||||
def list_all_clusters(self):
|
||||
"""get all cluster info
|
||||
|
||||
:return:
|
||||
"""
|
||||
LOG.info('list_all_clusters self.site.cluster_uri:%s .' %
|
||||
self.site.cluster_uri)
|
||||
|
||||
cluster_list = self.get(self.site.cluster_uri)['clusters']
|
||||
LOG.debug('clusters:%s' % cluster_list)
|
||||
return cluster_list
|
||||
|
||||
def init_all_cluster(self):
|
||||
"""get all cluster info
|
||||
|
||||
:return:
|
||||
"""
|
||||
LOG.debug('self.site.cluster_uri:%s .' % self.site.cluster_uri)
|
||||
|
||||
cfg_cluster_list = utils.split_strip(
|
||||
constant.CONF.fusioncompute.clusters)
|
||||
cluster_list = self.get(self.site.cluster_uri)['clusters']
|
||||
LOG.debug(
|
||||
'clusters:%s, split:%s .' %
|
||||
(constant.CONF.fusioncompute.clusters,
|
||||
','.join(cfg_cluster_list)))
|
||||
|
||||
self.clusters = {}
|
||||
for cluster in cluster_list:
|
||||
if cluster['name'] in cfg_cluster_list:
|
||||
self.clusters[cluster['name']] = cluster
|
||||
|
||||
def get_cluster_detail_by_nodename(self, nodename):
|
||||
"""get cluster by node name"""
|
||||
cluster_urn = self.get_cluster_urn_by_nodename(nodename)
|
||||
return self.get(utils.generate_uri_from_urn(cluster_urn))
|
||||
|
||||
def get_local_cluster_urn_list(self):
|
||||
"""get local config cluster urn
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.init_all_cluster()
|
||||
return [cluster['urn'] for cluster in self.clusters.values()]
|
||||
|
||||
def get_cluster_urn_by_nodename(self, nodename):
|
||||
"""get cluster urn by node name"""
|
||||
cluster_name = self.get_cluster_name_by_nodename(nodename)
|
||||
if cluster_name:
|
||||
self.init_all_cluster()
|
||||
if self.clusters.get(cluster_name):
|
||||
return self.clusters.get(cluster_name)['urn']
|
||||
return None
|
||||
|
||||
def get_cluster_urn_for_migrate(self, nodename):
|
||||
"""get cluster urn by node name"""
|
||||
cluster_name = self.get_cluster_name_by_nodename(nodename)
|
||||
if cluster_name:
|
||||
clusters = self.get(self.site.cluster_uri)['clusters']
|
||||
for cluster in clusters:
|
||||
if cluster_name == cluster['name']:
|
||||
return cluster['urn']
|
||||
return None
|
||||
|
||||
def update_resources(self):
|
||||
"""ini hypervisor info list
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.resources = []
|
||||
self.init_all_cluster()
|
||||
for cluster_name in self.clusters:
|
||||
self.resources.append(self.create_nodename(cluster_name))
|
||||
|
||||
def get_cluster_name_by_nodename(self, nodename):
|
||||
"""get cluster name by node info"""
|
||||
if nodename:
|
||||
temps = nodename.split('@')
|
||||
if len(temps) != 2:
|
||||
return nodename
|
||||
else:
|
||||
return temps[1]
|
||||
else:
|
||||
return nodename
|
||||
|
||||
def get_available_resource(self, nodename):
|
||||
"""Retrieve resource info.
|
||||
|
||||
This method is called when nova-compute launches, and
|
||||
as part of a periodic task.
|
||||
|
||||
:returns: dictionary describing resources
|
||||
"""
|
||||
cluster_name = self.get_cluster_name_by_nodename(nodename)
|
||||
cluster_resource = self.get_cluster_resource(cluster_name)
|
||||
if not cluster_resource:
|
||||
LOG.error(_("Invalid cluster name : %s"), nodename)
|
||||
return {}
|
||||
|
||||
cluster_resource['cpu_info'] = \
|
||||
jsonutils.dumps(cluster_resource['cpu_info'])
|
||||
# cluster_resource['supported_instances'] = jsonutils.dumps(
|
||||
# cluster_resource['supported_instances'])
|
||||
|
||||
LOG.debug("the resource status is %s", cluster_resource)
|
||||
return cluster_resource
|
||||
|
||||
def _query_host_by_scope(self, scope):
|
||||
"""Query host info
|
||||
|
||||
:param scope : clusterUrn , dvswitchUrn or datasotroeUrn
|
||||
:return a list of host in scope
|
||||
"""
|
||||
host_uri = utils.build_uri_with_params(self.site.host_uri,
|
||||
{'scope': scope})
|
||||
return self.get(host_uri)['hosts']
|
||||
|
||||
def _get_cluster_computeresource(self, cluster):
|
||||
computeres_uri = cluster["uri"] + "/" + \
|
||||
"allvmcomputeresource?isNeedAllocVcpus=true&detail=true"
|
||||
return self.get(computeres_uri)
|
||||
|
||||
def get_resource_group(self, cluster_urn, instance_group):
|
||||
|
||||
resource_group_uri = utils.generate_uri_from_urn(
|
||||
cluster_urn) + '/resourcegroups'
|
||||
condition = {'type': 0, 'useType': 1, 'name': instance_group[
|
||||
'uuid'], 'limit': 100, 'offset': 0}
|
||||
resource_group_uri = utils.build_uri_with_params(
|
||||
resource_group_uri, condition)
|
||||
resource_groups = self.get(resource_group_uri).get('groups')
|
||||
if resource_groups:
|
||||
return resource_groups[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_resource_group_list(self, cluster_urn):
|
||||
|
||||
resource_group_uri = utils.generate_uri_from_urn(
|
||||
cluster_urn) + '/resourcegroups'
|
||||
|
||||
offset = 0
|
||||
limit = 100
|
||||
resourcegroups_all = []
|
||||
while True:
|
||||
condition = {
|
||||
'limit': limit,
|
||||
'offset': offset,
|
||||
'type': 0,
|
||||
'useType': 1
|
||||
}
|
||||
resource_group_uri = utils.build_uri_with_params(
|
||||
resource_group_uri, condition)
|
||||
response = self.get(resource_group_uri)
|
||||
|
||||
total = int(response.get('total') or 0)
|
||||
if total > 0:
|
||||
resourcegroups = response.get('groups')
|
||||
resourcegroups_all += resourcegroups
|
||||
offset += len(resourcegroups)
|
||||
if offset >= total or len(resourcegroups_all) >= total or len(
|
||||
resourcegroups) < limit:
|
||||
break
|
||||
else:
|
||||
break
|
||||
return resourcegroups_all
|
||||
|
||||
def delete_resource_group(self, resource_group_urn):
|
||||
|
||||
resource_group_uri = utils.generate_uri_from_urn(resource_group_urn)
|
||||
|
||||
self.delete(resource_group_uri)
|
||||
|
||||
def create_resource_group(self, cluster_urn, instance_group):
|
||||
|
||||
resource_group_uri = utils.generate_uri_from_urn(
|
||||
cluster_urn) + '/resourcegroups'
|
||||
body = {'type': 0, 'useType': 1, 'name': instance_group[
|
||||
'uuid'], 'policies': instance_group.get('policies')}
|
||||
|
||||
resource_group = self.post(resource_group_uri, data=body)
|
||||
|
||||
return resource_group['urn']
|
||||
|
||||
def get_fc_current_time(self):
|
||||
current_time = self.get(self.site.current_time_uri)
|
||||
if current_time:
|
||||
utc_time = current_time["currentUtcTime"]
|
||||
utc_time_num_value = int(utc_time) / UTC_TIME_TO_SEC
|
||||
return utc_time_num_value
|
||||
return None
|
||||
|
||||
def get_cpu_usage(self, monitor_period, cluster_urn):
|
||||
end_time = self.get_fc_current_time()
|
||||
start_time = end_time - (monitor_period * 2)
|
||||
|
||||
body = [
|
||||
{
|
||||
"startTime": str(start_time),
|
||||
"endTime": str(end_time),
|
||||
"interval": str(monitor_period),
|
||||
"metricId": "cpu_usage",
|
||||
"urn": cluster_urn
|
||||
}
|
||||
]
|
||||
|
||||
LOG.debug("get_cpu_usage body:%s", json.dumps(body))
|
||||
response = self.fc_client.post(self.site.metric_curvedata_uri,
|
||||
data=body)
|
||||
LOG.debug("get_cpu_usage body:%s response:%s",
|
||||
json.dumps(body), json.dumps(response))
|
||||
if response:
|
||||
if len(response["items"]) > 0:
|
||||
metric_value = response["items"][0]["metricValue"]
|
||||
if len(metric_value) > 0:
|
||||
value = metric_value[0]["value"]
|
||||
if len(metric_value) is 2:
|
||||
if metric_value[1]["value"] is not None:
|
||||
value = metric_value[1]["value"]
|
||||
return value
|
||||
return None
|
||||
|
||||
def get_cluster_stats_by_name(self, cluster_name):
|
||||
"""Get the aggregate resource stats of a cluster."""
|
||||
cpu_info = dict(vcpus=0, cores=0, pcpus=0, vendor=[], model=[])
|
||||
mem_info = dict(total=0, used=0)
|
||||
cluster_urn = None
|
||||
cluster_query_info = {'name': cluster_name}
|
||||
cluster_query_uri = utils.build_uri_with_params(self.site.cluster_uri,
|
||||
cluster_query_info)
|
||||
clusters = self.get(cluster_query_uri)['clusters']
|
||||
find_cluster = None
|
||||
if clusters:
|
||||
for cluster in clusters:
|
||||
if cluster['name'] == cluster_name:
|
||||
find_cluster = cluster
|
||||
|
||||
if find_cluster:
|
||||
cluster_urn = find_cluster['urn']
|
||||
hosts = self._query_host_by_scope(cluster_urn)
|
||||
for host in hosts:
|
||||
if host['status'] == 'normal' and (not host['isMaintaining']):
|
||||
if 'vendor' in host:
|
||||
cpu_info['vendor'].append(host['vendor'])
|
||||
if 'model' in host:
|
||||
cpu_info['model'].append(host['model'])
|
||||
if 'physicalCpuQuantity' in host:
|
||||
cpu_info['pcpus'] += host['physicalCpuQuantity']
|
||||
|
||||
computeresource = self._get_cluster_computeresource(find_cluster)
|
||||
cpuResource = computeresource["cpuResource"]
|
||||
memResource = computeresource["memResource"]
|
||||
|
||||
allocated_cpu_detail = computeresource.get('detailCpuResource')
|
||||
allocated_mem_detail = computeresource.get('detailMemResource')
|
||||
|
||||
cpu_info["vcpus"] = cpuResource.get("totalVcpus", 0)
|
||||
cpu_info["allocatedVcpus"] = cpuResource.get("allocatedVcpus", 0)
|
||||
cpu_info["totalSizeMHz"] = cpuResource.get("totalSizeMHz")
|
||||
cpu_info["allocatedSizeMHz"] = cpuResource.get("allocatedSizeMHz")
|
||||
cpu_info["stopVmAllocatedVcpus"] = 0
|
||||
if allocated_cpu_detail is not None:
|
||||
cpu_info["stopVmAllocatedVcpus"] = allocated_cpu_detail.get(
|
||||
'allocatedVcpus').get("Stopped")
|
||||
|
||||
mem_info['total'] = memResource.get("totalSizeMB", 0)
|
||||
mem_info['used'] = memResource.get("allocatedSizeMB", 0)
|
||||
mem_info["stopVmAllocatedMem"] = 0
|
||||
if allocated_mem_detail is not None:
|
||||
mem_info["stopVmAllocatedMem"] = allocated_mem_detail.get(
|
||||
'allocatedSizeMB').get("Stopped")
|
||||
|
||||
cpu_usage_monitor_period = \
|
||||
constant.CONF.fusioncompute.cpu_usage_monitor_period
|
||||
if cpu_usage_monitor_period not in [300, 1800, 3600, 86400]:
|
||||
cpu_usage_monitor_period = 3600
|
||||
cpu_info["usage"] = self.get_cpu_usage(cpu_usage_monitor_period,
|
||||
cluster_urn)
|
||||
|
||||
data = {'cpu': cpu_info, 'mem': mem_info}
|
||||
return cluster_urn, data
|
||||
else:
|
||||
LOG.warn(_("get cluster status failed, use default."))
|
||||
data = {'cpu': cpu_info, 'mem': mem_info}
|
||||
return cluster_urn, data
|
||||
|
||||
def query_datastore_by_cluster_urn(self, cluster_urn):
|
||||
"""Query """
|
||||
datastore_cond = {'status': 'NORMAL', 'scope': cluster_urn}
|
||||
datastore_uri = utils.build_uri_with_params(self.site.datastore_uri,
|
||||
datastore_cond)
|
||||
return self.get(datastore_uri)['datastores']
|
||||
|
||||
def get_hypervisor_type(self):
|
||||
"""Returns the type of the hypervisor."""
|
||||
return constant.FC_DRIVER_JOINT_CFG['hypervisor_type']
|
||||
|
||||
def get_hypervisor_version(self):
|
||||
"""Get hypervisor version."""
|
||||
return constant.FC_DRIVER_JOINT_CFG['hypervisor_version']
|
||||
|
||||
def create_nodename(self, cluster_name):
|
||||
"""Creates the name that is stored in hypervisor_hostname column.
|
||||
|
||||
The name will be of the form similar to
|
||||
site001_GlodCluster008
|
||||
"""
|
||||
return '@'.join([self.site_id, cluster_name])
|
||||
|
||||
def get_instance_capabilities(self):
|
||||
"""get_instance_capabilities"""
|
||||
return [('i686', 'xen', 'xen'),
|
||||
('x86_64', 'xen', 'xen')]
|
||||
|
||||
def get_running_vms(self, cluster_urn):
|
||||
"""return vm counts in this cluster
|
||||
|
||||
:param cluster_urn:
|
||||
:return:
|
||||
"""
|
||||
return FC_MGR.get_total_vm_numbers(scope=cluster_urn,
|
||||
isTemplate=False,
|
||||
group=constant.VM_GROUP_FLAG)
|
||||
|
||||
def get_cluster_resource(self, cluster_name):
|
||||
"""get the current state of the cluster."""
|
||||
res = {}
|
||||
cluster_urn, cluster_stats = \
|
||||
self.get_cluster_stats_by_name(cluster_name)
|
||||
|
||||
disk_total = 0
|
||||
disk_available = 0
|
||||
|
||||
datastores = self.query_datastore_by_cluster_urn(cluster_urn)
|
||||
for datastore in datastores:
|
||||
disk_total += datastore['actualCapacityGB']
|
||||
disk_available += datastore['actualFreeSizeGB']
|
||||
|
||||
res["vcpus"] = int(int(cluster_stats['cpu']['vcpus'])
|
||||
* constant.CONF.fusioncompute.cpu_ratio)
|
||||
res["memory_mb"] = cluster_stats['mem']['total']
|
||||
res["local_gb"] = disk_total
|
||||
res["numa_topology"] = None
|
||||
res['vcpus_used'] = self._calculate_vcpu_mem_used(
|
||||
cluster_stats["cpu"]['stopVmAllocatedVcpus'],
|
||||
cluster_stats["cpu"]["allocatedVcpus"])
|
||||
res['memory_mb_used'] = self._calculate_vcpu_mem_used(
|
||||
cluster_stats["mem"]['stopVmAllocatedMem'],
|
||||
cluster_stats['mem']['used'])
|
||||
res['local_gb_used'] = disk_total - disk_available
|
||||
cpu_info = cluster_stats["cpu"]
|
||||
topology = {"cores": cpu_info['cores'],
|
||||
"threads": cpu_info['vcpus']}
|
||||
extra_cpu_info = {
|
||||
"totalSizeMHz": str(cpu_info["totalSizeMHz"]),
|
||||
"allocatedSizeMHz": str(cpu_info["allocatedSizeMHz"]),
|
||||
"usage": str(cpu_info["usage"])
|
||||
}
|
||||
|
||||
res["cpu_info"] = {"vendor": cpu_info['vendor'],
|
||||
"model": cpu_info['model'],
|
||||
"topology": topology,
|
||||
"extra_info": extra_cpu_info,
|
||||
'pcpus': cpu_info['pcpus']}
|
||||
res["hypervisor_type"] = self.get_hypervisor_type()
|
||||
res["hypervisor_version"] = self.get_hypervisor_version()
|
||||
res["hypervisor_hostname"] = self.create_nodename(cluster_name)
|
||||
res["supported_instances"] = self.get_instance_capabilities()
|
||||
|
||||
res['running_vms'] = self.get_running_vms(cluster_urn)
|
||||
|
||||
return res
|
||||
|
||||
def _calculate_vcpu_mem_used(self, stopped_vm_allocated, all_vm_allocated):
|
||||
resource_reduced_rate = 100
|
||||
if constant.CONF.fusioncompute.resource_reduced_rate is not None:
|
||||
resource_reduced_rate\
|
||||
= constant.CONF.fusioncompute.resource_reduced_rate
|
||||
return all_vm_allocated - stopped_vm_allocated \
|
||||
+ math.ceil(stopped_vm_allocated *
|
||||
float(resource_reduced_rate) / 100)
|
||||
|
||||
def _modify_cluster(self, cluster, changes):
|
||||
"""_modify_cluster
|
||||
|
||||
:param cluster: fc cluster
|
||||
:param changes: modify body {}
|
||||
:return:
|
||||
"""
|
||||
|
||||
self.put(cluster['uri'],
|
||||
data=changes,
|
||||
excp=fc_exc.ModifyClusterFailure)
|
||||
|
||||
def _get_drs_rules_from_cluster(self, cluster, rule_name, rule_type):
|
||||
"""_get_drs_rules_from_cluster
|
||||
|
||||
:param cluster:
|
||||
:param rule_name:
|
||||
:param rule_type:
|
||||
:return:
|
||||
"""
|
||||
drs_rules = cluster['drsSetting']['drsRules']
|
||||
for drs_rule in drs_rules:
|
||||
if drs_rule['ruleName'] == rule_name \
|
||||
and drs_rule['ruleType'] == rule_type:
|
||||
return drs_rule
|
||||
return None
|
||||
|
||||
def create_drs_rules(self, cluster, rule_name, rule_type):
|
||||
"""create_drs_rules
|
||||
|
||||
:param cluster:
|
||||
:param rule_name:
|
||||
:param rule_type:
|
||||
:return:
|
||||
"""
|
||||
|
||||
rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
|
||||
if rule:
|
||||
LOG.debug(_("drs rules %s already exists"), rule_name)
|
||||
return
|
||||
|
||||
body = {
|
||||
'drsSetting': {
|
||||
'drsRules': [{
|
||||
'operationType': constant.DRS_RULES_OP_TYPE_MAP['create'],
|
||||
'ruleName': rule_name,
|
||||
'ruleType': rule_type
|
||||
}]
|
||||
}
|
||||
}
|
||||
self._modify_cluster(cluster, body)
|
||||
LOG.debug(_("create drs rules %s succeed"), rule_name)
|
||||
|
||||
def delete_drs_rules(self, cluster, rule_name, rule_type):
|
||||
"""delete_drs_rules
|
||||
|
||||
:param cluster:
|
||||
:param rule_name:
|
||||
:param rule_type:
|
||||
:return:
|
||||
"""
|
||||
|
||||
rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
|
||||
if rule is None:
|
||||
LOG.debug(_("drs rules %s not exists"), rule_name)
|
||||
return
|
||||
|
||||
body = {
|
||||
'drsSetting': {
|
||||
'drsRules': [{
|
||||
'operationType': constant.DRS_RULES_OP_TYPE_MAP['delete'],
|
||||
'ruleIndex': rule['ruleIndex']
|
||||
}]
|
||||
}
|
||||
}
|
||||
self._modify_cluster(cluster, body)
|
||||
LOG.debug(_("delete drs rules %s succeed"), rule_name)
|
||||
|
||||
def modify_drs_rules(self, cluster, rule_name, rule_type, vms):
|
||||
"""modify_drs_rules
|
||||
|
||||
:param cluster:
|
||||
:param rule_name:
|
||||
:param rule_type:
|
||||
:param vms:
|
||||
:return:
|
||||
"""
|
||||
|
||||
rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
|
||||
if rule is None:
|
||||
msg = (_("Can not find drs rules: name=%s,") % rule_name)
|
||||
raise fc_exc.AffinityGroupException(reason=msg)
|
||||
|
||||
body = {
|
||||
'drsSetting': {
|
||||
'drsRules': [{
|
||||
'operationType': constant.DRS_RULES_OP_TYPE_MAP['modify'],
|
||||
'ruleIndex': rule['ruleIndex'],
|
||||
'ruleName': rule_name,
|
||||
'ruleType': rule_type,
|
||||
'vms': vms
|
||||
}]
|
||||
}
|
||||
}
|
||||
self._modify_cluster(cluster, body)
|
||||
LOG.debug(_("modify drs rules %s succeed"), rule_name)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,599 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import re
|
||||
import types
|
||||
|
||||
from nova.compute import power_state
|
||||
from nova.i18n import _
|
||||
from nova.virt.fusioncomputeapi import osconfig
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
fusion_compute_opts = [
|
||||
cfg.StrOpt('fc_user',
|
||||
default='',
|
||||
help='FusionCompute user name'),
|
||||
cfg.StrOpt('fc_pwd',
|
||||
default='',
|
||||
help='FusionCompute user password',
|
||||
secret=True),
|
||||
cfg.StrOpt('fc_ip',
|
||||
default=None,
|
||||
help='Management IP of FusionCompute'),
|
||||
cfg.StrOpt('fc_image_path',
|
||||
default=None,
|
||||
help='NFS Image server path'),
|
||||
cfg.StrOpt('vxlan_dvs_name',
|
||||
default=None,
|
||||
help='FusionCompute dvswitch name for vxlan network'),
|
||||
cfg.StrOpt('enhanced_network_dvs_name',
|
||||
default=None,
|
||||
help='FusionCompute dvs_name for enhanced_network network'),
|
||||
cfg.BoolOpt('use_admin_pass',
|
||||
default=False,
|
||||
help='Create vm using the admin pass or fusionCompute pass'),
|
||||
cfg.StrOpt('clusters',
|
||||
default='',
|
||||
help='FusionCompute clusters mapped to hypervisors'),
|
||||
cfg.FloatOpt('cpu_ratio',
|
||||
default=1,
|
||||
help='FusionCompute cpu multiplexing ratio'),
|
||||
cfg.StrOpt('glance_server_ip',
|
||||
default=None,
|
||||
help='FusionSphere glance server ip'),
|
||||
cfg.StrOpt('uds_access_key',
|
||||
default=None,
|
||||
help='FusionCompute uds image access key',
|
||||
secret=True),
|
||||
cfg.StrOpt('uds_secret_key',
|
||||
default=None,
|
||||
help='FusionCompute uds image secret key',
|
||||
secret=True),
|
||||
cfg.StrOpt('instance_initial_mode',
|
||||
default='FusionCompute',
|
||||
help='Instance initial mode '
|
||||
'which is cloud_init or FusionCompute'),
|
||||
cfg.IntOpt('resource_reduced_rate',
|
||||
default=100,
|
||||
help='Resource reduced rate for '
|
||||
'cpu and memory in FusionCompute'),
|
||||
cfg.StrOpt('reserve_disk_symbol',
|
||||
default='True',
|
||||
help='Reserve b/c/d disk symbol in FusionCompute.'),
|
||||
cfg.StrOpt('fusioncompute_file_inject',
|
||||
default='enabled',
|
||||
help='Use file inject by FusionCompute'),
|
||||
cfg.IntOpt('cdrom_sequence_num',
|
||||
default=0,
|
||||
help='cdrom letter in FusionCompute'),
|
||||
cfg.IntOpt('cpu_usage_monitor_period',
|
||||
default=3600,
|
||||
help='FusionCompute cpu usage monitor period'),
|
||||
cfg.IntOpt('workers',
|
||||
default=4,
|
||||
help='FusionCompute compute process number'),
|
||||
cfg.IntOpt('fc_request_timeout_max',
|
||||
default=3600,
|
||||
help='FusionCompute request timeout max'),
|
||||
cfg.IntOpt('fc_request_timeout_min',
|
||||
default=300,
|
||||
help='FusionCompute request timeout min'),
|
||||
cfg.IntOpt('attach_int_timeout',
|
||||
default=90,
|
||||
help='Attach interface timeout'),
|
||||
cfg.IntOpt('safe_stop_vm_timeout',
|
||||
default=600,
|
||||
help='Safe stop vm timeout'),
|
||||
cfg.IntOpt('fc_request_timeout_delete_vm',
|
||||
default=36000,
|
||||
help='FusionCompute request timeout delete vm'),
|
||||
cfg.IntOpt('fc_request_timeout_delete_vm_timelimited',
|
||||
default=30000,
|
||||
help='FusionCompute request timeout delete vm'),
|
||||
cfg.BoolOpt('enable_virtualio',
|
||||
default=False,
|
||||
help='Vm virtual IO'),
|
||||
cfg.BoolOpt('enable_snapshot_auto_del',
|
||||
default=False,
|
||||
help='enable backup snapshot auto '
|
||||
'delete before volume detach'),
|
||||
cfg.StrOpt('host',
|
||||
default="",
|
||||
help='glance host'),
|
||||
cfg.StrOpt('port',
|
||||
default="",
|
||||
help='glance port')
|
||||
]
|
||||
|
||||
CONF.register_opts(fusion_compute_opts, group='fusioncompute')
|
||||
|
||||
virt_opts = [
|
||||
cfg.BoolOpt('use_kbox',
|
||||
default=False,
|
||||
help='use kbox in libvirt'),
|
||||
cfg.BoolOpt('local_resume_instance',
|
||||
default=True,
|
||||
help='Auto start the instance when stop itself')
|
||||
]
|
||||
|
||||
CONF.register_opts(virt_opts)
|
||||
|
||||
FC_DRIVER_JOINT_CFG = {
|
||||
'user_type': '2',
|
||||
'fc_port': 7443,
|
||||
'api_version': '6.0',
|
||||
'hypervisor_version': 60,
|
||||
'hypervisor_type': 'FusionCompute',
|
||||
'request_time_out': 120,
|
||||
'dvs_mapping': 'physnet1:service',
|
||||
'volume_is_thin': True
|
||||
}
|
||||
|
||||
|
||||
def str_to_bool(cver_str):
|
||||
"""convert string to boolean
|
||||
|
||||
:param cver_str: string should to convert
|
||||
:return: Boolean
|
||||
"""
|
||||
if isinstance(cver_str, types.BooleanType):
|
||||
return cver_str
|
||||
elif isinstance(cver_str, types.StringType):
|
||||
bool_map = {'true': True, 'false': False}
|
||||
bool_str = cver_str.lower() if cver_str else ""
|
||||
if bool_str not in bool_map:
|
||||
raise ValueError('%s is not valid boolean.' % cver_str)
|
||||
else:
|
||||
return bool_map[bool_str]
|
||||
else:
|
||||
raise ValueError('%s is not valid boolean.' % cver_str)
|
||||
|
||||
|
||||
class Enum(dict):
|
||||
"""enum object
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, **enums):
|
||||
super(Enum, self).__init__()
|
||||
for key in enums:
|
||||
self[key] = enums[key]
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self.get(name)
|
||||
|
||||
CONFIG_ITEM_TYPE = Enum(STR=0,
|
||||
INT=1,
|
||||
BOOL=2)
|
||||
|
||||
# ignore pylint:disable=R0903
|
||||
|
||||
|
||||
class ConfigItemValue(object):
|
||||
"""fc config detail item
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, key, value, conf_type=CONFIG_ITEM_TYPE.STR):
|
||||
self.key = key
|
||||
self.value = value
|
||||
self.conf_type = conf_type
|
||||
if conf_type is None:
|
||||
raise TypeError('item %s set value %s type eror.' % (key, value))
|
||||
|
||||
def set_value(self, value):
|
||||
"""set item value basis by type
|
||||
|
||||
"""
|
||||
if self.conf_type == CONFIG_ITEM_TYPE.STR:
|
||||
self.value = str(value)
|
||||
elif self.conf_type == CONFIG_ITEM_TYPE.INT:
|
||||
try:
|
||||
int_value = int(value)
|
||||
self.value = int_value
|
||||
except ValueError:
|
||||
LOG.error(_("%s config to int fail."), str(value))
|
||||
elif self.conf_type == CONFIG_ITEM_TYPE.BOOL:
|
||||
try:
|
||||
bool_value = str_to_bool(value)
|
||||
self.value = bool_value
|
||||
except ValueError:
|
||||
LOG.error(_("%s config to bool fail."), str(value))
|
||||
else:
|
||||
LOG.error(_("config type %s is error."), self.conf_type)
|
||||
|
||||
|
||||
class FcConfig(dict):
|
||||
"""fc config file manager
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, cfg_path, default_value):
|
||||
super(FcConfig, self).__init__()
|
||||
for key, value in default_value.items():
|
||||
self[key] = value
|
||||
|
||||
for key, value in json.load(open(cfg_path, 'r')).items():
|
||||
if self.get(key):
|
||||
self[key].set_value(value)
|
||||
else:
|
||||
LOG.error(_("%s not exists in config."), key)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self.get(name):
|
||||
return self.get(name).value
|
||||
else:
|
||||
return None
|
||||
|
||||
# ================CONFIG constant begin========================
|
||||
FC_PLUG_CONFIG_PATH = '/etc/nova/fc-nova-compute'
|
||||
|
||||
TEMPLATE_VHD_SIZE = 1024
|
||||
TEMPLATE_VHD_FILE = '%s/template.vhd' % FC_PLUG_CONFIG_PATH
|
||||
|
||||
OS_CONFIG_FILE = '%s/huawei-os-config.conf' % FC_PLUG_CONFIG_PATH
|
||||
VIRTUAL_IO_OS_CONFIG_FILE = '%s/virtualio-os-config.conf' % FC_PLUG_CONFIG_PATH
|
||||
|
||||
FC_DRIVER_DEFAULT_CFG = {
|
||||
'fc_user': ConfigItemValue('fc_user', ''),
|
||||
'fc_pwd': ConfigItemValue('fc_pwd', ''),
|
||||
'fc_ip': ConfigItemValue('fc_ip', None),
|
||||
'user_type': ConfigItemValue('user_type', 2, CONFIG_ITEM_TYPE.INT),
|
||||
'fc_port': ConfigItemValue('fc_port', 7443, CONFIG_ITEM_TYPE.INT),
|
||||
'api_version': ConfigItemValue('api_version', '6.0'),
|
||||
'hypervisor_version': ConfigItemValue('hypervisor_version', 60,
|
||||
CONFIG_ITEM_TYPE.INT),
|
||||
'hypervisor_type': ConfigItemValue('hypervisor_type', 'FusionCompute'),
|
||||
'fc_image_path': ConfigItemValue('fc_image_path', None),
|
||||
'dvs_vxlan': ConfigItemValue('dvs_vxlan', None),
|
||||
'cluster': ConfigItemValue('cluster', None),
|
||||
'dvs_mapping': ConfigItemValue('dvs_mapping', 'physnet1:service'),
|
||||
'request_time_out': ConfigItemValue('request_time_out', 120,
|
||||
CONFIG_ITEM_TYPE.INT),
|
||||
'gen_admin_pass': ConfigItemValue('gen_admin_pass', False,
|
||||
CONFIG_ITEM_TYPE.BOOL),
|
||||
'volume_is_thin': ConfigItemValue('volume_is_thin', True,
|
||||
CONFIG_ITEM_TYPE.BOOL),
|
||||
'clusters': ConfigItemValue('clusters', ''),
|
||||
'cpu_rate': ConfigItemValue('cpu_rate', 1, CONFIG_ITEM_TYPE.INT),
|
||||
'glance_server_ip': ConfigItemValue('glance_server_ip', None),
|
||||
'uds_access_key': ConfigItemValue('uds_access_key', None),
|
||||
'uds_secret_key': ConfigItemValue('uds_secret_key', None),
|
||||
'cpu_usage_monitor_period': ConfigItemValue('cpu_usage_monitor_period',
|
||||
60 * 60, CONFIG_ITEM_TYPE.INT)
|
||||
}
|
||||
|
||||
FC_PLUG_CONFIG_FILE = '%s/fc-nova-compute.conf' % FC_PLUG_CONFIG_PATH
|
||||
"""FC_CONF = FcConfig(FC_PLUG_CONFIG_FILE, FC_DRIVER_DEFAULT_CFG)
|
||||
|
||||
"""
|
||||
|
||||
|
||||
# ============================vm constant begin===============
|
||||
|
||||
VM_GROUP_FLAG = 'FSP'
|
||||
|
||||
VM_STATUS = Enum(UNKNOWN='unknown', RUNNING='running',
|
||||
STOPPED='stopped', STOPPING='stopping',
|
||||
PAUSED='pause', SUSPENDED='hibernated',
|
||||
MIGRATING='migrating',
|
||||
FAULTRESUMING='fault-resuming')
|
||||
|
||||
NOTIFY_NEUTRON = Enum(FALSE='false',
|
||||
TRUE='true')
|
||||
|
||||
PV_STATUS = Enum(STARTING='starting', RUNNING='running',
|
||||
NOTRUNNING='notRunning')
|
||||
|
||||
VM_POWER_STATE_MAPPING = {
|
||||
VM_STATUS.UNKNOWN: power_state.NOSTATE,
|
||||
VM_STATUS.RUNNING: power_state.RUNNING,
|
||||
VM_STATUS.PAUSED: power_state.PAUSED,
|
||||
VM_STATUS.STOPPING: power_state.SHUTDOWN,
|
||||
VM_STATUS.STOPPED: power_state.SHUTDOWN,
|
||||
VM_STATUS.SUSPENDED: power_state.SUSPENDED
|
||||
}
|
||||
|
||||
FC_RETURN_ERROR = "FusionCompute return failed."
|
||||
|
||||
REBOOT_TYPE = Enum(SOFT='SOFT', HARD='HARD')
|
||||
FC_REBOOT_TYPE = {
|
||||
REBOOT_TYPE.HARD: 'force',
|
||||
REBOOT_TYPE.SOFT: 'safe'
|
||||
}
|
||||
|
||||
HUAWEI_OS_TYPE = '__os_type'
|
||||
HUAWEI_OS_VERSION = '__os_version'
|
||||
HUAWEI_IMAGE_LOCATION = '__image_location'
|
||||
HUAWEI_IMAGE_TYPE = '__image_source_type'
|
||||
HUAWEI_IS_LINK_CLONE = '__linked_clone'
|
||||
|
||||
HUAWEI_OS_TYPE_MAP = {
|
||||
'windows': 'Windows',
|
||||
'linux': 'Linux',
|
||||
'other': 'Other'
|
||||
}
|
||||
|
||||
DEFAULT_HUAWEI_OS_TYPE = 'Other'
|
||||
DEFAULT_HUAWEI_OS_VERSION = 'Other(32 bit)'
|
||||
|
||||
# ComputeOps._init_os_config() will do real initialization
|
||||
DEFAULT_HUAWEI_OS_CONFIG = ['', '']
|
||||
|
||||
HUAWEI_OS_VERSION_INT = osconfig.OS_VERSION_INT
|
||||
HUAWEI_OS_VERSION_STR = osconfig.OS_VERSION_STR
|
||||
HUAWEI_VIRTUAL_IO_OS_VERSION_INT = osconfig.VIRTUAL_IO_OS_VERSION_INT
|
||||
VIRTUAL_IO_OS_LIST = []
|
||||
|
||||
BOOT_OPTION_MAP = {
|
||||
'hd': 'disk',
|
||||
'hd,network': 'disk',
|
||||
'network': 'pxe',
|
||||
'network,hd': 'pxe',
|
||||
'default': 'disk'
|
||||
}
|
||||
|
||||
VNC_KEY_MAP_SETTING = {
|
||||
'en-us': 7,
|
||||
'de': 4,
|
||||
'fr': 12,
|
||||
'ru': 30,
|
||||
'es': 8,
|
||||
'default': 7
|
||||
}
|
||||
|
||||
IPV4_VERSION = 4
|
||||
|
||||
DRS_RULES_TYPE_MAP = {
|
||||
'affinity': 1,
|
||||
'anti-affinity': 2
|
||||
}
|
||||
|
||||
DRS_RULES_OP_TYPE_MAP = {
|
||||
'delete': 0,
|
||||
'modify': 1,
|
||||
'create': 2
|
||||
}
|
||||
|
||||
# =================uri constant begin================
|
||||
VM_URI_MAP = {
|
||||
'start': '/action/start',
|
||||
'stop': '/action/stop',
|
||||
'reboot': '/action/reboot',
|
||||
'pause': '/action/pause',
|
||||
'unpause': '/action/resume',
|
||||
'import': '/action/import',
|
||||
'export': '/action/export',
|
||||
'unresume': '/action/unresume',
|
||||
'migrate': '/action/migrate',
|
||||
'clone': '/action/clone',
|
||||
'set_vm_data': '/action/uploadVmData',
|
||||
'attachvol': '/action/attachvol',
|
||||
'detachvol': '/action/detachvol',
|
||||
'expandvol': '/action/expandvol',
|
||||
'suspend': '/action/hibernate',
|
||||
'attach_gpu': '/action/attachgpu',
|
||||
'detach_gpu': '/action/detachgpu',
|
||||
'nics': '/virtualNics'
|
||||
}
|
||||
|
||||
VOL_URI_MAP = {
|
||||
'modio': '/modifyIOpropertyOfVolume'
|
||||
}
|
||||
|
||||
FC_SITE_URI_MAP = {
|
||||
'vm_uri': {
|
||||
'baseuri': '%(site_uri)s/vms'
|
||||
},
|
||||
|
||||
'import_vm_uri': {
|
||||
'baseuri': '%(vm_uri)s/action/import',
|
||||
'dependuri': ['vm_uri']
|
||||
},
|
||||
'cluster_uri': {
|
||||
'baseuri': '%(site_uri)s/clusters'
|
||||
},
|
||||
'host_uri': {
|
||||
'baseuri': '%(site_uri)s/hosts'
|
||||
},
|
||||
'datastore_uri': {
|
||||
'baseuri': '%(site_uri)s/datastores'
|
||||
},
|
||||
'volume_uri': {
|
||||
'baseuri': '%(site_uri)s/volumes'
|
||||
},
|
||||
'dvswitchs_uri': {
|
||||
'baseuri': '%(site_uri)s/dvswitchs'
|
||||
},
|
||||
'current_time_uri': {
|
||||
'baseuri': '%(site_uri)s/monitors/getSysCurrentTime'
|
||||
},
|
||||
'metric_curvedata_uri': {
|
||||
'baseuri': '%(site_uri)s/monitors/objectmetric-curvedata'
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
TOKEN_URI = '/service/session'
|
||||
SITE_URI = '/service/sites'
|
||||
|
||||
# =================network====================
|
||||
TYPE_FNC = 2
|
||||
TYPE_VLAN = 'vlan'
|
||||
TYPE_VXLAN = 'vxlan'
|
||||
TYPE_FLAT = 'flat'
|
||||
|
||||
DVSWITCHS = 'dvSwitchs'
|
||||
DVS_URI = '/dvswitchs'
|
||||
PORT_GROUP_URI = DVS_URI + '/%(dvs_id)s/portgroups'
|
||||
PORT_GROUP_ID_URI = PORT_GROUP_URI + '/%(pg_id)s'
|
||||
VSP_URI = DVS_URI + '/%(dvs_id)s/vsps'
|
||||
|
||||
VSP_TAG_KEY = 'NeutronPort'
|
||||
|
||||
# =================other====================
|
||||
ID_IN_URN_REGEX = re.compile(r':(?P<id>[^:]+)$')
|
||||
CPU_QOS_NOVA_KEY = ['quota:cpu_shares',
|
||||
'quota:cpu_limit',
|
||||
'quota:cpu_reserve']
|
||||
CPU_QOS_FC_KEY = ['weight',
|
||||
'limit',
|
||||
'reservation']
|
||||
CPU_QOS_FC_DEFAULT_VALUE = [1000, 0, 0]
|
||||
|
||||
MOUNT_DEVICE_SEQNUM_MAP = {
|
||||
'/dev/sda': 1, '/dev/vda': 1, '/dev/xvda': 1,
|
||||
'/dev/sdb': 2, '/dev/vdb': 2, '/dev/xvdb': 2,
|
||||
'/dev/sdc': 3, '/dev/vdc': 3, '/dev/xvdc': 3,
|
||||
'/dev/sdd': 4, '/dev/vdd': 4, '/dev/xvdd': 4,
|
||||
'/dev/sde': 5, '/dev/vde': 5, '/dev/xvde': 5,
|
||||
'/dev/sdf': 6, '/dev/vdf': 6, '/dev/xvdf': 6,
|
||||
'/dev/sdg': 7, '/dev/vdg': 7, '/dev/xvdg': 7,
|
||||
'/dev/sdh': 8, '/dev/vdh': 8, '/dev/xvdh': 8,
|
||||
'/dev/sdi': 9, '/dev/vdi': 9, '/dev/xvdi': 9,
|
||||
'/dev/sdj': 10, '/dev/vdj': 10, '/dev/xvdj': 10,
|
||||
'/dev/sdk': 11, '/dev/vdk': 11, '/dev/xvdk': 11,
|
||||
'/dev/sdl': 12, '/dev/vdl': 12, '/dev/xvdl': 12,
|
||||
'/dev/sdm': 13, '/dev/vdm': 13, '/dev/xvdm': 13,
|
||||
'/dev/sdn': 14, '/dev/vdn': 14, '/dev/xvdn': 14,
|
||||
'/dev/sdo': 15, '/dev/vdo': 15, '/dev/xvdo': 15,
|
||||
'/dev/sdp': 16, '/dev/vdp': 16, '/dev/xvdp': 16,
|
||||
'/dev/sdq': 17, '/dev/vdq': 17, '/dev/xvdq': 17,
|
||||
'/dev/sdr': 18, '/dev/vdr': 18, '/dev/xvdr': 18,
|
||||
'/dev/sds': 19, '/dev/vds': 19, '/dev/xvds': 19,
|
||||
'/dev/sdt': 20, '/dev/vdt': 20, '/dev/xvdt': 20,
|
||||
'/dev/sdu': 21, '/dev/vdu': 21, '/dev/xvdu': 21,
|
||||
'/dev/sdv': 22, '/dev/vdv': 22, '/dev/xvdv': 22,
|
||||
'/dev/sdw': 23, '/dev/vdw': 23, '/dev/xvdw': 23,
|
||||
'/dev/sdx': 24, '/dev/vdx': 24, '/dev/xvdx': 24,
|
||||
'/dev/sdy': 25, '/dev/vdy': 25, '/dev/xvdy': 25,
|
||||
'/dev/sdz': 26, '/dev/vdz': 26, '/dev/xvdz': 26,
|
||||
'/dev/sdaa': 27, '/dev/vdaa': 27, '/dev/xvdaa': 27,
|
||||
'/dev/sdab': 28, '/dev/vdab': 28, '/dev/xvdab': 28,
|
||||
'/dev/sdac': 29, '/dev/vdac': 29, '/dev/xvdac': 29,
|
||||
'/dev/sdad': 30, '/dev/vdad': 30, '/dev/xvdad': 30,
|
||||
'/dev/sdae': 31, '/dev/vdae': 31, '/dev/xvdae': 31,
|
||||
'/dev/sdaf': 32, '/dev/vdaf': 32, '/dev/xvdaf': 32,
|
||||
'/dev/sdag': 33, '/dev/vdag': 33, '/dev/xvdag': 33,
|
||||
'/dev/sdah': 34, '/dev/vdah': 34, '/dev/xvdah': 34,
|
||||
'/dev/sdai': 35, '/dev/vdai': 35, '/dev/xvdai': 35,
|
||||
'/dev/sdaj': 36, '/dev/vdaj': 36, '/dev/xvdaj': 36,
|
||||
'/dev/sdak': 37, '/dev/vdak': 37, '/dev/xvdak': 37,
|
||||
'/dev/sdal': 38, '/dev/vdal': 38, '/dev/xvdal': 38,
|
||||
'/dev/sdam': 39, '/dev/vdam': 39, '/dev/xvdam': 39,
|
||||
'/dev/sdan': 40, '/dev/vdan': 40, '/dev/xvdan': 40,
|
||||
'/dev/sdao': 41, '/dev/vdao': 41, '/dev/xvdao': 41,
|
||||
'/dev/sdap': 42, '/dev/vdap': 42, '/dev/xvdap': 42,
|
||||
'/dev/sdaq': 43, '/dev/vdaq': 43, '/dev/xvdaq': 43,
|
||||
'/dev/sdar': 44, '/dev/vdar': 44, '/dev/xvdar': 44,
|
||||
'/dev/sdas': 45, '/dev/vdas': 45, '/dev/xvdas': 45,
|
||||
'/dev/sdat': 46, '/dev/vdat': 46, '/dev/xvdat': 46,
|
||||
'/dev/sdau': 47, '/dev/vdau': 47, '/dev/xvdau': 47,
|
||||
'/dev/sdav': 48, '/dev/vdav': 48, '/dev/xvdav': 48,
|
||||
'/dev/sdaw': 49, '/dev/vdaw': 49, '/dev/xvdaw': 49,
|
||||
'/dev/sdax': 50, '/dev/vdax': 50, '/dev/xvdax': 50,
|
||||
'/dev/sday': 51, '/dev/vday': 51, '/dev/xvday': 51,
|
||||
'/dev/sdaz': 52, '/dev/vdaz': 52, '/dev/xvdaz': 52,
|
||||
'/dev/sdba': 53, '/dev/vdba': 53, '/dev/xvdba': 53,
|
||||
'/dev/sdbb': 54, '/dev/vdbb': 54, '/dev/xvdbb': 54,
|
||||
'/dev/sdbc': 55, '/dev/vdbc': 55, '/dev/xvdbc': 55,
|
||||
'/dev/sdbd': 56, '/dev/vdbd': 56, '/dev/xvdbd': 56,
|
||||
'/dev/sdbe': 57, '/dev/vdbe': 57, '/dev/xvdbe': 57,
|
||||
'/dev/sdbf': 58, '/dev/vdbf': 58, '/dev/xvdbf': 58,
|
||||
'/dev/sdbg': 59, '/dev/vdbg': 59, '/dev/xvdbg': 59,
|
||||
'/dev/sdbh': 60, '/dev/vdbh': 60, '/dev/xvdbh': 60,
|
||||
'/dev/sdbi': 61, '/dev/vdbi': 61, '/dev/xvdbi': 61,
|
||||
'/dev/sdbj': 62, '/dev/vdbj': 62, '/dev/xvdbj': 62,
|
||||
'/dev/sdbk': 63, '/dev/vdbk': 63, '/dev/xvdbk': 63,
|
||||
'/dev/sdbl': 64, '/dev/vdbl': 64, '/dev/xvdbl': 64,
|
||||
'/dev/sdbm': 65, '/dev/vdbm': 65, '/dev/xvdbm': 65
|
||||
}
|
||||
|
||||
MOUNT_DEVICE_SEQNUM_MAP_IDE = {
|
||||
'/dev/sda': 1, '/dev/vda': 1, '/dev/xvda': 1,
|
||||
'/dev/sdb': 1001, '/dev/vdb': 1001, '/dev/xvdb': 1001,
|
||||
'/dev/sdc': 1002, '/dev/vdc': 1002, '/dev/xvdc': 1002,
|
||||
'/dev/sdd': 1003, '/dev/vdd': 1003, '/dev/xvdd': 1003,
|
||||
'/dev/sde': 2, '/dev/vde': 2, '/dev/xvde': 2,
|
||||
'/dev/sdf': 3, '/dev/vdf': 3, '/dev/xvdf': 3,
|
||||
'/dev/sdg': 4, '/dev/vdg': 4, '/dev/xvdg': 4,
|
||||
'/dev/sdh': 5, '/dev/vdh': 5, '/dev/xvdh': 5,
|
||||
'/dev/sdi': 6, '/dev/vdi': 6, '/dev/xvdi': 6,
|
||||
'/dev/sdj': 7, '/dev/vdj': 7, '/dev/xvdj': 7,
|
||||
'/dev/sdk': 8, '/dev/vdk': 8, '/dev/xvdk': 8,
|
||||
'/dev/sdl': 9, '/dev/vdl': 9, '/dev/xvdl': 9,
|
||||
'/dev/sdm': 10, '/dev/vdm': 10, '/dev/xvdm': 10,
|
||||
'/dev/sdn': 11, '/dev/vdn': 11, '/dev/xvdn': 11,
|
||||
'/dev/sdo': 12, '/dev/vdo': 12, '/dev/xvdo': 12,
|
||||
'/dev/sdp': 13, '/dev/vdp': 13, '/dev/xvdp': 13,
|
||||
'/dev/sdq': 14, '/dev/vdq': 14, '/dev/xvdq': 14,
|
||||
'/dev/sdr': 15, '/dev/vdr': 15, '/dev/xvdr': 15,
|
||||
'/dev/sds': 16, '/dev/vds': 16, '/dev/xvds': 16,
|
||||
'/dev/sdt': 17, '/dev/vdt': 17, '/dev/xvdt': 17,
|
||||
'/dev/sdu': 18, '/dev/vdu': 18, '/dev/xvdu': 18,
|
||||
'/dev/sdv': 19, '/dev/vdv': 19, '/dev/xvdv': 19,
|
||||
'/dev/sdw': 20, '/dev/vdw': 20, '/dev/xvdw': 20,
|
||||
'/dev/sdx': 21, '/dev/vdx': 21, '/dev/xvdx': 21,
|
||||
'/dev/sdy': 22, '/dev/vdy': 22, '/dev/xvdy': 22,
|
||||
'/dev/sdz': 1004, '/dev/vdz': 1004, '/dev/xvdz': 1004,
|
||||
'/dev/sdaa': 23, '/dev/vdaa': 23, '/dev/xvdaa': 23,
|
||||
'/dev/sdab': 24, '/dev/vdab': 24, '/dev/xvdab': 24,
|
||||
'/dev/sdac': 25, '/dev/vdac': 25, '/dev/xvdac': 25,
|
||||
'/dev/sdad': 26, '/dev/vdad': 26, '/dev/xvdad': 26,
|
||||
'/dev/sdae': 27, '/dev/vdae': 27, '/dev/xvdae': 27,
|
||||
'/dev/sdaf': 28, '/dev/vdaf': 28, '/dev/xvdaf': 28,
|
||||
'/dev/sdag': 29, '/dev/vdag': 29, '/dev/xvdag': 29,
|
||||
'/dev/sdah': 30, '/dev/vdah': 30, '/dev/xvdah': 30,
|
||||
'/dev/sdai': 31, '/dev/vdai': 31, '/dev/xvdai': 31,
|
||||
'/dev/sdaj': 32, '/dev/vdaj': 32, '/dev/xvdaj': 32,
|
||||
'/dev/sdak': 33, '/dev/vdak': 33, '/dev/xvdak': 33,
|
||||
'/dev/sdal': 34, '/dev/vdal': 34, '/dev/xvdal': 34,
|
||||
'/dev/sdam': 35, '/dev/vdam': 35, '/dev/xvdam': 35,
|
||||
'/dev/sdan': 36, '/dev/vdan': 36, '/dev/xvdan': 36,
|
||||
'/dev/sdao': 37, '/dev/vdao': 37, '/dev/xvdao': 37,
|
||||
'/dev/sdap': 38, '/dev/vdap': 38, '/dev/xvdap': 38,
|
||||
'/dev/sdaq': 39, '/dev/vdaq': 39, '/dev/xvdaq': 39,
|
||||
'/dev/sdar': 40, '/dev/vdar': 40, '/dev/xvdar': 40,
|
||||
'/dev/sdas': 41, '/dev/vdas': 41, '/dev/xvdas': 41,
|
||||
'/dev/sdat': 42, '/dev/vdat': 42, '/dev/xvdat': 42,
|
||||
'/dev/sdau': 43, '/dev/vdau': 43, '/dev/xvdau': 43,
|
||||
'/dev/sdav': 44, '/dev/vdav': 44, '/dev/xvdav': 44,
|
||||
'/dev/sdaw': 45, '/dev/vdaw': 45, '/dev/xvdaw': 45,
|
||||
'/dev/sdax': 46, '/dev/vdax': 46, '/dev/xvdax': 46,
|
||||
'/dev/sday': 47, '/dev/vday': 47, '/dev/xvday': 47,
|
||||
'/dev/sdaz': 48, '/dev/vdaz': 48, '/dev/xvdaz': 48,
|
||||
'/dev/sdba': 49, '/dev/vdba': 49, '/dev/xvdba': 49,
|
||||
'/dev/sdbb': 50, '/dev/vdbb': 50, '/dev/xvdbb': 50,
|
||||
'/dev/sdbc': 51, '/dev/vdbc': 51, '/dev/xvdbc': 51,
|
||||
'/dev/sdbd': 52, '/dev/vdbd': 52, '/dev/xvdbd': 52,
|
||||
'/dev/sdbe': 53, '/dev/vdbe': 53, '/dev/xvdbe': 53,
|
||||
'/dev/sdbf': 54, '/dev/vdbf': 54, '/dev/xvdbf': 54,
|
||||
'/dev/sdbg': 55, '/dev/vdbg': 55, '/dev/xvdbg': 55,
|
||||
'/dev/sdbh': 56, '/dev/vdbh': 56, '/dev/xvdbh': 56,
|
||||
'/dev/sdbi': 57, '/dev/vdbi': 57, '/dev/xvdbi': 57,
|
||||
'/dev/sdbj': 58, '/dev/vdbj': 58, '/dev/xvdbj': 58,
|
||||
'/dev/sdbk': 59, '/dev/vdbk': 59, '/dev/xvdbk': 59,
|
||||
'/dev/sdbl': 60, '/dev/vdbl': 60, '/dev/xvdbl': 60,
|
||||
'/dev/sdbm': 61, '/dev/vdbm': 61, '/dev/xvdbm': 61
|
||||
}
|
||||
|
||||
FUSIONCOMPUTE_MAX_VOLUME_NUM = 11
|
|
@ -0,0 +1,955 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
import time
|
||||
|
||||
from nova import context as nova_context
|
||||
from nova import exception as nova_exc
|
||||
from nova.i18n import _
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from nova import objects
|
||||
from nova.virt import driver as compute_driver
|
||||
from nova.virt.fusioncomputeapi import cluster as fc_cluster
|
||||
from nova.virt.fusioncomputeapi import computeops
|
||||
from nova.virt.fusioncomputeapi import constant
|
||||
from nova.virt.fusioncomputeapi.fcclient import FCBaseClient
|
||||
from nova.virt.fusioncomputeapi.fcinstance import FC_INSTANCE_MANAGER as FC_MGR
|
||||
from nova.virt.fusioncomputeapi import networkops
|
||||
from nova.virt.fusioncomputeapi import taskops
|
||||
from nova.virt.fusioncomputeapi import utils
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
from nova.virt.fusioncomputeapi import volumeops
|
||||
|
||||
|
||||
class FusionComputeDriver(compute_driver.ComputeDriver):
|
||||
"""FusionComputeDriver: for OpenStack Manager"""
|
||||
|
||||
def __init__(self, virtapi):
|
||||
LOG.info(_('begin to init FusionComputeDriver ...'))
|
||||
super(FusionComputeDriver, self).__init__(virtapi)
|
||||
|
||||
self._client = FCBaseClient(
|
||||
constant.CONF.fusioncompute.fc_ip,
|
||||
constant.CONF.fusioncompute.fc_user,
|
||||
constant.CONF.fusioncompute.fc_pwd,
|
||||
constant.FC_DRIVER_JOINT_CFG['user_type'],
|
||||
ssl=True,
|
||||
port=constant.FC_DRIVER_JOINT_CFG['fc_port'],
|
||||
api_version=constant.FC_DRIVER_JOINT_CFG['api_version'],
|
||||
request_time_out=constant.FC_DRIVER_JOINT_CFG['request_time_out'])
|
||||
self._client.set_default_site()
|
||||
|
||||
# task ops is need by other ops, init it first
|
||||
self.task_ops = taskops.TaskOperation(self._client)
|
||||
FC_MGR.set_client(self._client)
|
||||
|
||||
self.network_ops = networkops.NetworkOps(self._client, self.task_ops)
|
||||
self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops)
|
||||
self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops)
|
||||
self.compute_ops = computeops.ComputeOps(self._client, self.task_ops,
|
||||
self.network_ops,
|
||||
self.volume_ops,
|
||||
self.cluster_ops)
|
||||
|
||||
def _list_all_clusters(self):
|
||||
LOG.debug(_("_list_all_clusters"))
|
||||
return self.cluster_ops.list_all_clusters()
|
||||
|
||||
def is_fc_up(self):
|
||||
LOG.debug(_("is_fc_up"))
|
||||
try:
|
||||
clusters = self._list_all_clusters()
|
||||
except Exception as ex:
|
||||
LOG.error(_("is_fc_up %s") % ex)
|
||||
return False
|
||||
if clusters is None:
|
||||
LOG.error(_("is_fc_up clusters is None"))
|
||||
return False
|
||||
if len(clusters) < 1:
|
||||
LOG.error(_("len clusters is zero"))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def init_host(self, host):
|
||||
"""FC driver init goes here"""
|
||||
pass
|
||||
|
||||
def get_info(self, instance):
|
||||
"""Get the current status of an instance by uuid
|
||||
|
||||
:param instance:
|
||||
:return
|
||||
"""
|
||||
return self.compute_ops.get_info(instance)
|
||||
|
||||
def get_instance_extra_specs(self, instance):
|
||||
"""get_instance_extra_specs
|
||||
|
||||
get instance extra info
|
||||
:param instance:
|
||||
:return
|
||||
"""
|
||||
# ignore pylint:disable=E1101
|
||||
inst_type = objects.Flavor.get_by_id(
|
||||
nova_context.get_admin_context(read_deleted='yes'),
|
||||
instance['instance_type_id'])
|
||||
return inst_type.get('extra_specs', {})
|
||||
|
||||
def get_resource_group_list(self):
|
||||
"""get_resource_group_list
|
||||
|
||||
get instance group list
|
||||
:param instance:
|
||||
:return:
|
||||
"""
|
||||
|
||||
node_list = self.get_available_nodes()
|
||||
resource_groups = []
|
||||
if node_list:
|
||||
for node in node_list:
|
||||
cluster_urn = self.cluster_ops.get_cluster_urn_by_nodename(
|
||||
node)
|
||||
resource_groups_per_cluster \
|
||||
= self.cluster_ops.get_resource_group_list(cluster_urn)
|
||||
resource_groups.extend(resource_groups_per_cluster)
|
||||
return resource_groups
|
||||
|
||||
def delete_resource_group(self, resource_group_urn):
|
||||
"""delete_resource_group
|
||||
|
||||
delete instance group list
|
||||
:param : resource_group_urn
|
||||
:return:
|
||||
"""
|
||||
self.cluster_ops.delete_resource_group(resource_group_urn)
|
||||
|
||||
@utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_max)
|
||||
def _get_instances_info(self):
|
||||
"""_get_instances_info
|
||||
|
||||
Get all instances info from FusionCompute
|
||||
:return:
|
||||
"""
|
||||
return self.compute_ops.get_instances_info()
|
||||
|
||||
def get_instances_info(self):
|
||||
"""get_instances_info
|
||||
|
||||
Get all instances info from FusionCompute
|
||||
:return:
|
||||
"""
|
||||
LOG.debug(_("get_instances_info"))
|
||||
try:
|
||||
instances = self._get_instances_info()
|
||||
except Exception as ex:
|
||||
LOG.error(_("get_instances_info: %s") % ex)
|
||||
return {}
|
||||
if instances is None:
|
||||
return {}
|
||||
return instances
|
||||
|
||||
def get_instance_disk_info(self, instance_name,
|
||||
block_device_info=None):
|
||||
"""Retrieve information about actual disk sizes of an instance.
|
||||
|
||||
:param instance_name:
|
||||
name of a nova instance as returned by list_instances()
|
||||
:param block_device_info:
|
||||
Optional; Can be used to filter out devices which are
|
||||
actually volumes.
|
||||
:return:
|
||||
json strings with below format::
|
||||
|
||||
"[{'path':'disk',
|
||||
'type':'raw',
|
||||
'virt_disk_size':'10737418240',
|
||||
'backing_file':'backing_file',
|
||||
'disk_size':'83886080'
|
||||
'over_committed_disk_size':'10737418240'},
|
||||
...]"
|
||||
"""
|
||||
return [{}]
|
||||
|
||||
def spawn(self, context, instance, image_meta, injected_files,
|
||||
admin_password, network_info=None, block_device_info=None):
|
||||
"""Create vm.
|
||||
|
||||
:param context:
|
||||
:param instance:
|
||||
:param image_meta:
|
||||
:param injected_files:
|
||||
:param admin_password:
|
||||
:param network_info:
|
||||
:param block_device_info:
|
||||
:return
|
||||
"""
|
||||
# @utils.func_log_circle(instance)
|
||||
def _create_vm():
|
||||
"""_create_vm
|
||||
|
||||
inner create vm
|
||||
:return:
|
||||
"""
|
||||
extra_specs = self.get_instance_extra_specs(instance)
|
||||
LOG.debug(_("extra_specs is %s."), jsonutils.dumps(extra_specs))
|
||||
|
||||
vm_password = admin_password if constant.CONF.fusioncompute.use_admin_pass\
|
||||
else None
|
||||
|
||||
# create vm on FC
|
||||
self.compute_ops.create_vm(context, instance, network_info,
|
||||
block_device_info,
|
||||
image_meta, injected_files,
|
||||
vm_password, extra_specs)
|
||||
_create_vm()
|
||||
|
||||
def power_off(
|
||||
self,
|
||||
instance,
|
||||
timeout=0,
|
||||
retry_interval=0,
|
||||
forceStop=False):
|
||||
"""Power off the specified instance.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
@utils.func_log_circle(instance, nova_exc.InstanceFaultRollback)
|
||||
def _stop_vm():
|
||||
"""_stop_vm
|
||||
|
||||
inner stop vm
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.stop_vm(instance, forceStop)
|
||||
|
||||
_stop_vm()
|
||||
|
||||
def power_on(self, context, instance, network_info,
|
||||
block_device_info=None):
|
||||
"""Power on the specified instance.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _start_vm():
|
||||
"""_start_vm
|
||||
|
||||
inner start vm
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.start_vm(instance, block_device_info)
|
||||
|
||||
_start_vm()
|
||||
|
||||
def reboot(self, context, instance, network_info, reboot_type,
|
||||
block_device_info=None, bad_volumes_callback=None):
|
||||
@utils.func_log_circle(instance)
|
||||
def _reboot_vm_fc():
|
||||
"""_reboot_vm_fc
|
||||
|
||||
inner reboot vm
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
self.compute_ops.reboot_vm(
|
||||
instance, reboot_type, block_device_info)
|
||||
except Exception as ex:
|
||||
LOG.error(_("reboot_vm exception: %s") % ex)
|
||||
|
||||
_reboot_vm_fc()
|
||||
|
||||
def cleanup(self, context, instance, network_info, block_device_info=None,
|
||||
destroy_disks=True, migrate_data=None, destroy_vifs=True):
|
||||
"""Cleanup the instance resources ."""
|
||||
pass
|
||||
|
||||
def destroy(self, context, instance, network_info, block_device_info=None,
|
||||
destroy_disks=True, migrate_data=None):
|
||||
"""FC itself will clean up network and disks"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _delete_vm():
|
||||
"""inner delete vm
|
||||
|
||||
:return
|
||||
"""
|
||||
self.compute_ops.delete_vm(context, instance,
|
||||
block_device_info=block_device_info,
|
||||
destroy_disks=destroy_disks)
|
||||
_delete_vm()
|
||||
|
||||
def pause(self, instance):
|
||||
"""Pause the specified instance.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _pause_vm():
|
||||
"""_pause_vm
|
||||
|
||||
inner pause vm
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.pause_vm(instance)
|
||||
_pause_vm()
|
||||
|
||||
def unpause(self, instance):
|
||||
"""Unpause paused instance.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _unpause_vm():
|
||||
"""_unpause_vm
|
||||
|
||||
inner unpause vm
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.unpause_vm(instance)
|
||||
_unpause_vm()
|
||||
|
||||
def suspend(self, context, instance):
|
||||
"""Suspend instance.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
@utils.func_log_circle(instance, nova_exc.InstanceFaultRollback)
|
||||
def _suspend_vm():
|
||||
"""_suspend_vm
|
||||
|
||||
inner unpause vm
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.suspend_vm(instance)
|
||||
_suspend_vm()
|
||||
|
||||
def resume(self, context, instance, network_info, block_device_info=None):
|
||||
"""resume the specified instance.
|
||||
|
||||
:param context: the context for the resume
|
||||
:param instance: nova.objects.instance.Instance being resumed
|
||||
:param network_info:
|
||||
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
|
||||
:param block_device_info: instance volume block device info
|
||||
"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _resume_vm():
|
||||
"""_resume_vm
|
||||
|
||||
inner resume vm, same action as start_vm in FC
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.start_vm(instance, block_device_info)
|
||||
|
||||
_resume_vm()
|
||||
|
||||
def change_instance_metadata(self, context, instance, diff):
|
||||
"""change_instance_metadata
|
||||
|
||||
:param context:
|
||||
:param instance:
|
||||
:param diff:
|
||||
:return:
|
||||
"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _change_instance_metadata():
|
||||
"""_change_instance_metadata
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.change_instance_metadata(instance)
|
||||
_change_instance_metadata()
|
||||
|
||||
def change_instance_info(self, context, instance):
|
||||
"""change_instance_info
|
||||
|
||||
:param context:
|
||||
:param instance:
|
||||
:return:
|
||||
"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _change_instance_info():
|
||||
"""_change_instance_info
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.change_instance_info(instance)
|
||||
_change_instance_info()
|
||||
|
||||
def resume_state_on_host_boot(self, context, instance, network_info,
|
||||
block_device_info=None):
|
||||
"""resume guest state when a host is booted.
|
||||
|
||||
FC can do HA automatically, so here we only rewrite this interface
|
||||
to avoid NotImplementedError() in nova-compute.log
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
pass
|
||||
|
||||
def confirm_migration(self, migration, instance, network_info):
|
||||
"""Confirms a resize, destroying the source VM.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
@utils.func_log_circle(instance, nova_exc.InstanceFaultRollback)
|
||||
def _confirm_migration():
|
||||
"""_confirm_migration
|
||||
|
||||
inner confirm migration
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.confirm_migration(instance)
|
||||
_confirm_migration()
|
||||
|
||||
def pre_live_migration(self, ctxt, instance, block_device_info,
|
||||
network_info, disk_info, migrate_data=None):
|
||||
"""Prepare an instance for live migration"""
|
||||
|
||||
# do nothing on FC
|
||||
pass
|
||||
|
||||
def check_can_live_migrate_destination(self, context, instance,
|
||||
src_compute_info, dst_compute_info,
|
||||
block_migration=False,
|
||||
disk_over_commit=False):
|
||||
"""Check if it is possible to execute live migration.
|
||||
|
||||
This runs checks on the destination host, and then calls
|
||||
back to the source host to check the results.
|
||||
|
||||
:param context: security context
|
||||
:param instance: nova.db.sqlalchemy.models.Instance
|
||||
:param src_compute_info: Info about the sending machine
|
||||
:param dst_compute_info: Info about the receiving machine
|
||||
:param block_migration: if true, prepare for block migration
|
||||
:param disk_over_commit: if true, allow disk over commit
|
||||
:returns: a dict containing migration info (hypervisor-dependent)
|
||||
"""
|
||||
return {}
|
||||
|
||||
def check_can_live_migrate_destination_cleanup(self, context,
|
||||
dest_check_data):
|
||||
"""Do required cleanup on dest host after check_can_live_migrate calls
|
||||
|
||||
:param context: security context
|
||||
:param dest_check_data: result of check_can_live_migrate_destination
|
||||
"""
|
||||
pass
|
||||
|
||||
def check_can_live_migrate_source(self, context, instance,
|
||||
dest_check_data, block_device_info=None):
|
||||
"""Check if it is possible to execute live migration.
|
||||
|
||||
This checks if the live migration can succeed, based on the
|
||||
results from check_can_live_migrate_destination.
|
||||
|
||||
:param context: security context
|
||||
:param instance: nova.db.sqlalchemy.models.Instance
|
||||
:param dest_check_data: result of check_can_live_migrate_destination
|
||||
:param block_device_info: result of _get_instance_block_device_info
|
||||
:returns: a dict containing migration info (hypervisor-dependent)
|
||||
"""
|
||||
return {}
|
||||
|
||||
def ensure_filtering_rules_for_instance(self, instance, network_info):
|
||||
"""Setting up filtering rules and waiting for its completion.
|
||||
|
||||
To migrate an instance, filtering rules to hypervisors
|
||||
and firewalls are inevitable on destination host.
|
||||
( Waiting only for filtering rules to hypervisor,
|
||||
since filtering rules to firewall rules can be set faster).
|
||||
|
||||
Concretely, the below method must be called.
|
||||
- setup_basic_filtering (for nova-basic, etc.)
|
||||
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
|
||||
|
||||
to_xml may have to be called since it defines PROJNET, PROJMASK.
|
||||
but libvirt migrates those value through migrateToURI(),
|
||||
so , no need to be called.
|
||||
|
||||
Don't use thread for this method since migration should
|
||||
not be started when setting-up filtering rules operations
|
||||
are not completed.
|
||||
|
||||
:param instance: nova.objects.instance.Instance object
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def unfilter_instance(self, instance, network_info):
|
||||
"""Stop filtering instance."""
|
||||
pass
|
||||
|
||||
# ignore pylint:disable=W0613
|
||||
def live_migration(self, context, instance_ref, dest,
|
||||
post_method, recover_method, block_migration=False,
|
||||
migrate_data=None):
|
||||
"""Live migration of an instance to another host."""
|
||||
@utils.func_log_circle(instance_ref)
|
||||
def _live_migration():
|
||||
"""inner live migrate vm
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.live_migration(
|
||||
context,
|
||||
instance_ref,
|
||||
dest,
|
||||
post_method,
|
||||
recover_method,
|
||||
block_migration,
|
||||
migrate_data)
|
||||
_live_migration()
|
||||
|
||||
def post_live_migration(self, ctxt, instance_ref, block_device_info,
|
||||
migrate_data=None):
|
||||
"""Post operation of live migration at source host."""
|
||||
|
||||
# do nothing on FC
|
||||
pass
|
||||
|
||||
def post_live_migration_at_destination(self, context, instance,
|
||||
network_info,
|
||||
block_migration=False,
|
||||
block_device_info=None):
|
||||
"""Post operation of live migration at destination host."""
|
||||
|
||||
def _post_live_migration_at_destination():
|
||||
self.compute_ops.post_live_migration_at_destination(instance)
|
||||
_post_live_migration_at_destination()
|
||||
|
||||
def post_live_migration_at_source(self, context, instance, network_info):
|
||||
"""Unplug VIFs from networks at source.
|
||||
|
||||
:param context: security context
|
||||
:param instance: instance object reference
|
||||
:param network_info: instance network information
|
||||
"""
|
||||
# do nothing on FC
|
||||
pass
|
||||
|
||||
def rollback_live_migration_at_destination(self, ctxt, instance_ref,
|
||||
network_info,
|
||||
block_device_info,
|
||||
destroy_disks=True,
|
||||
migrate_data=None):
|
||||
"""Clean up destination node after a failed live migration."""
|
||||
|
||||
# do nothing on FC
|
||||
pass
|
||||
|
||||
def get_volume_connector(self, instance):
|
||||
return {'ip': constant.CONF.my_ip,
|
||||
'host': constant.CONF.host}
|
||||
|
||||
def instance_exists(self, instance):
|
||||
try:
|
||||
FC_MGR.get_vm_by_uuid(instance)
|
||||
return True
|
||||
except nova_exc.InstanceNotFound:
|
||||
return False
|
||||
|
||||
def get_available_resource(self, nodename):
|
||||
"""Retrieve resource info.
|
||||
|
||||
This method is called when nova-compute launches, and
|
||||
as part of a periodic task.
|
||||
|
||||
:returns: dictionary describing resources
|
||||
"""
|
||||
return self.cluster_ops.get_available_resource(nodename)
|
||||
|
||||
def get_host_stats(self, refresh=False):
|
||||
"""Return currently known host stats."""
|
||||
|
||||
stats_list = []
|
||||
nodes = self.get_available_nodes_without_exception(refresh=refresh)
|
||||
for node in nodes:
|
||||
stats_list.append(self.get_available_resource(node))
|
||||
return stats_list
|
||||
|
||||
def node_is_available(self, nodename):
|
||||
"""Return whether this compute service manages a particular node."""
|
||||
if nodename in self.get_available_nodes_without_exception():
|
||||
return True
|
||||
# Refresh and check again.
|
||||
return nodename in self.get_available_nodes_without_exception(
|
||||
refresh=True)
|
||||
|
||||
def get_host_ip_addr(self):
|
||||
"""Retrieves the IP address of the dom0
|
||||
|
||||
"""
|
||||
# Avoid NotImplementedError
|
||||
pass
|
||||
|
||||
@utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_min)
|
||||
def _get_available_nodes(self, refresh=True):
|
||||
"""Returns nodenames of all nodes managed by the compute service."""
|
||||
|
||||
LOG.debug(_("_get_available_nodes"))
|
||||
# default is refresh to ensure it is latest
|
||||
if refresh:
|
||||
try:
|
||||
self.cluster_ops.update_resources()
|
||||
except Exception as ex:
|
||||
LOG.error(_("get clusters from fc exception"))
|
||||
LOG.exception(ex)
|
||||
raise ex
|
||||
|
||||
node_list = self.cluster_ops.resources
|
||||
LOG.debug(_("_get_available_nodes: %s") % node_list)
|
||||
return node_list
|
||||
|
||||
def get_available_nodes(self, refresh=True):
|
||||
"""Returns nodenames of all nodes managed by the compute service."""
|
||||
|
||||
LOG.debug(_("get_available_nodes"))
|
||||
|
||||
node_list = self._get_available_nodes(refresh)
|
||||
|
||||
# node_list is None only when exception is throwed.
|
||||
if node_list is None:
|
||||
raise nova_exc.HypervisorUnavailable(host='fc-nova-compute')
|
||||
else:
|
||||
return node_list
|
||||
|
||||
def get_available_nodes_without_exception(self, refresh=True):
|
||||
"""Returns nodenames of all nodes managed by the compute service."""
|
||||
|
||||
LOG.debug(_("get_available_nodes"))
|
||||
try:
|
||||
node_list = self._get_available_nodes(refresh)
|
||||
except Exception as ex:
|
||||
LOG.error(_("get_available_nodes: %s") % ex)
|
||||
return []
|
||||
if node_list is None:
|
||||
return []
|
||||
else:
|
||||
return node_list
|
||||
|
||||
def get_hypervisor_version(self):
|
||||
"""Get hypervisor version."""
|
||||
return self.cluster_ops.get_hypervisor_version()
|
||||
|
||||
def get_hypervisor_type(self):
|
||||
"""Returns the type of the hypervisor."""
|
||||
return self.cluster_ops.get_hypervisor_type()
|
||||
|
||||
def get_instance_capabilities(self):
|
||||
"""get_instance_capabilities"""
|
||||
return self.cluster_ops.get_instance_capabilities()
|
||||
|
||||
@utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_min)
|
||||
def _list_instances(self):
|
||||
LOG.debug(_("_list_instances"))
|
||||
instances = self.compute_ops.list_all_fc_instance()
|
||||
return instances
|
||||
|
||||
def list_instances(self):
|
||||
LOG.debug(_("list_instances"))
|
||||
try:
|
||||
instances = self._list_instances()
|
||||
except Exception as ex:
|
||||
LOG.debug(_("The available nodes are: %s") % ex)
|
||||
return []
|
||||
if instances is None:
|
||||
LOG.error(_("instances is None"))
|
||||
return []
|
||||
else:
|
||||
return [vm['name'] for vm in instances]
|
||||
|
||||
@utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_min)
|
||||
def _list_instance_uuids(self):
|
||||
"""_list_instance_uuids"""
|
||||
fc_instances = self.compute_ops.list_all_fc_instance()
|
||||
return fc_instances
|
||||
|
||||
def list_instance_uuids(self):
|
||||
"""list_instance_uuids"""
|
||||
try:
|
||||
fc_instances = self._list_instance_uuids()
|
||||
except Exception as ex:
|
||||
LOG.error(_("list_instance_uuids: %s") % ex)
|
||||
return []
|
||||
if fc_instances is None:
|
||||
LOG.error(_("fc_instances is None"))
|
||||
return []
|
||||
return [vm['uuid'] for vm in fc_instances]
|
||||
|
||||
def get_vnc_console(self, context, instance):
|
||||
"""Get connection info for a vnc console.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
# return password only in called by manager.get_vnc_console
|
||||
# if called by manager.validate_console_port, return without password
|
||||
get_opt = True
|
||||
stack_list = inspect.stack()
|
||||
if str(stack_list[1][3]) != "get_vnc_console":
|
||||
get_opt = False
|
||||
|
||||
return self.compute_ops.get_vnc_console(instance, get_opt)
|
||||
|
||||
def attach_interface(self, instance, image_meta, vif):
|
||||
"""attach_interface
|
||||
|
||||
attach interface into fusion compute virtual machine, now
|
||||
do not consider inic network interface
|
||||
|
||||
:param instance:
|
||||
:param image_meta:
|
||||
:param vif:
|
||||
:return:
|
||||
"""
|
||||
|
||||
@utils.func_log_circle(instance)
|
||||
@utils.timelimited(constant.CONF.fusioncompute.attach_int_timeout)
|
||||
def attach_intf_inner():
|
||||
"""attach_intf_inner
|
||||
|
||||
inner attach interface
|
||||
"""
|
||||
extra_specs = self.get_instance_extra_specs(instance)
|
||||
return self.compute_ops.attach_interface(
|
||||
instance, vif, extra_specs)
|
||||
try:
|
||||
return attach_intf_inner()
|
||||
except Exception as ex:
|
||||
LOG.error("Exception %s", ex)
|
||||
raise ex
|
||||
|
||||
def detach_interface(self, instance, vif):
|
||||
"""detach_interface
|
||||
|
||||
detach interface from fusion compute virtual machine, if the nic has
|
||||
not exited, don't raise exception
|
||||
|
||||
:param instance:
|
||||
:param vif:
|
||||
:return:
|
||||
"""
|
||||
|
||||
@utils.func_log_circle(instance)
|
||||
def detach_intf_inner():
|
||||
"""detach_intf_inner
|
||||
|
||||
inner detach interface
|
||||
:return:
|
||||
"""
|
||||
return self.compute_ops.detach_interface(instance, vif)
|
||||
return detach_intf_inner()
|
||||
|
||||
def migrate_disk_and_power_off(self, context, instance, dest, flavor,
|
||||
network_info, block_device_info=None,
|
||||
timeout=0, retry_interval=0):
|
||||
"""migrate_disk_and_power_off
|
||||
|
||||
Transfers the disk of a running instance in multiple phases, turning
|
||||
off the instance before the end.
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
@utils.func_log_circle(instance, nova_exc.InstanceFaultRollback)
|
||||
def _migrate_disk_and_power_off():
|
||||
"""inner modify vm
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.migrate_disk_and_power_off(
|
||||
instance, dest, flavor, block_device_info)
|
||||
_migrate_disk_and_power_off()
|
||||
|
||||
def finish_migration(self, context, migration, instance, disk_info,
|
||||
network_info, image_meta, resize_instance,
|
||||
block_device_info=None, power_on=True):
|
||||
"""Completes a resize.
|
||||
|
||||
:param context: the context for the migration/resize
|
||||
:param migration: the migrate/resize information
|
||||
:param instance: nova.objects.instance.Instance being migrated/resized
|
||||
:param disk_info: the newly transferred disk information
|
||||
:param network_info:
|
||||
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
|
||||
:param image_meta: image object returned by nova.image.glance that
|
||||
defines the image from which this instance
|
||||
was created
|
||||
:param resize_instance: True if the instance is being resized,
|
||||
False otherwise
|
||||
:param block_device_info: instance volume block device info
|
||||
:param power_on: True if the instance should be powered on, False
|
||||
otherwise
|
||||
"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _finish_migration():
|
||||
"""_finish_migration
|
||||
|
||||
inner finish migrate vm
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.finish_migration(
|
||||
instance, power_on, block_device_info)
|
||||
_finish_migration()
|
||||
|
||||
def finish_revert_migration(self, context, instance, network_info,
|
||||
block_device_info=None, power_on=True):
|
||||
"""Finish reverting a resize.
|
||||
|
||||
:param context: the context for the finish_revert_migration
|
||||
:param instance: nova.objects.instance.Instance being migrated/resized
|
||||
:param network_info:
|
||||
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
|
||||
:param block_device_info: instance volume block device info
|
||||
:param power_on: True if the instance should be powered on, False
|
||||
otherwise
|
||||
"""
|
||||
|
||||
@utils.func_log_circle(instance)
|
||||
def _finish_revert_migration():
|
||||
"""inner finish revert migration
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.finish_revert_migration(
|
||||
instance, power_on, block_device_info)
|
||||
_finish_revert_migration()
|
||||
|
||||
def attach_volume(self, context, connection_info, instance, mountpoint,
|
||||
disk_bus=None, device_type=None, encryption=None):
|
||||
"""Attach the disk to the instance at mountpoint using info."""
|
||||
@utils.func_log_circle(instance)
|
||||
def _attach_volume():
|
||||
"""_attach_volume
|
||||
|
||||
inner attach volume
|
||||
:return:
|
||||
"""
|
||||
retry_num = 8
|
||||
for count in range(retry_num):
|
||||
try:
|
||||
LOG.info(_('Attach volume count is %s '), count + 1)
|
||||
self.compute_ops.attach_volume(connection_info,
|
||||
instance,
|
||||
mountpoint)
|
||||
LOG.info(_('Attach volume success.'))
|
||||
return
|
||||
except Exception as ex:
|
||||
LOG.error(_('Attach volume fail %s'), repr(ex))
|
||||
if count >= retry_num - 1:
|
||||
raise ex
|
||||
time.sleep(10 + count * 10)
|
||||
|
||||
_attach_volume()
|
||||
|
||||
def detach_volume(self, connection_info, instance, mountpoint,
|
||||
encryption=None):
|
||||
"""Detach the disk attached to the instance."""
|
||||
@utils.func_log_circle(instance)
|
||||
def _detach_volume():
|
||||
"""_detach_volume
|
||||
|
||||
inner detach volume
|
||||
:return:
|
||||
"""
|
||||
retry_num = 8
|
||||
for count in range(retry_num):
|
||||
try:
|
||||
LOG.info(_('Detach volume count is %s '), count + 1)
|
||||
self.compute_ops.detach_volume(connection_info, instance)
|
||||
LOG.info(_('Detach volume success.'))
|
||||
return
|
||||
except Exception as ex:
|
||||
LOG.error(_('Detach volume fail %s'), repr(ex))
|
||||
if count >= retry_num - 1:
|
||||
raise ex
|
||||
time.sleep(10 + count * 10)
|
||||
|
||||
_detach_volume()
|
||||
|
||||
def snapshot(self, context, instance, image_id, update_task_state):
|
||||
"""Snapshots the specified instance.
|
||||
|
||||
:param context: security context
|
||||
:param instance: Instance object as returned by DB layer.
|
||||
:param image_id: Reference to a pre-created image that will
|
||||
hold the snapshot.
|
||||
"""
|
||||
@utils.func_log_circle(instance)
|
||||
def _snapshot():
|
||||
"""_snapshot
|
||||
|
||||
create vm snapshot
|
||||
:return:
|
||||
"""
|
||||
self.compute_ops.snapshot(context, instance, image_id,
|
||||
update_task_state)
|
||||
|
||||
_snapshot()
|
||||
|
||||
def report_instances_state(self, host):
|
||||
"""report_instances_state
|
||||
|
||||
Report instances state on compute starting.
|
||||
"""
|
||||
pass
|
||||
|
||||
def report_host_state(self, host):
|
||||
"""report_host_state
|
||||
|
||||
Report host state on compute starting.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_pci_slots_from_xml(self, instance):
|
||||
"""get_pci_slots_from_xml
|
||||
|
||||
:param instance:
|
||||
:return:
|
||||
"""
|
||||
return []
|
||||
|
||||
def reconfigure_affinity_group(self, instances, affinity_group, action,
|
||||
node=None):
|
||||
"""Add or Remove vms from affinity group
|
||||
|
||||
:param instances:
|
||||
:param affinity_group:
|
||||
:param action:
|
||||
:param node:
|
||||
:return:
|
||||
"""
|
||||
|
||||
@utils.func_log_circle()
|
||||
def _reconfigure_affinity_group():
|
||||
|
||||
self.compute_ops.reconfigure_affinity_group(instances,
|
||||
affinity_group,
|
||||
action,
|
||||
node)
|
||||
|
||||
_reconfigure_affinity_group()
|
||||
|
||||
def clean_fc_network_pg(self):
|
||||
"""clean_fc_network_pg
|
||||
|
||||
:return:
|
||||
"""
|
||||
@utils.func_log_circle()
|
||||
def _clean_fc_network_pg():
|
||||
self.network_ops.audit_pg()
|
||||
|
||||
_clean_fc_network_pg()
|
|
@ -0,0 +1,166 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import exception as nova_exc
|
||||
from nova.i18n import _
|
||||
|
||||
|
||||
class RequestError(nova_exc.Invalid):
|
||||
|
||||
msg_fmt = _("FC request error: %(reason)s, errorcode: %(error_code)s.")
|
||||
|
||||
|
||||
class TimeoutError(nova_exc.Invalid):
|
||||
msg_fmt = _("Request timeout: %(reason)s, errorcode: %(error_code)s.")
|
||||
|
||||
|
||||
class NoAvailableSite(nova_exc.NotFound):
|
||||
|
||||
msg_fmt = _("No available site found.")
|
||||
|
||||
|
||||
class DVSwitchNotFound(nova_exc.NotFound):
|
||||
|
||||
msg_fmt = _("DVS %(dvs_id)s could not be found.")
|
||||
|
||||
|
||||
class VSPNotFound(nova_exc.NotFound):
|
||||
|
||||
msg_fmt = _("VSP %(vsp_id)s could not be found")
|
||||
|
||||
|
||||
class ClusterNotFound(nova_exc.InvalidHypervisorType):
|
||||
"""ClusterNotFound"""
|
||||
msg_fmt = _("Cluster %(cluster_name)s could not be found")
|
||||
|
||||
|
||||
class ModifyClusterFailure(nova_exc.NovaException):
|
||||
|
||||
msg_fmt = _("Failed to modify cluster: %(reason)s")
|
||||
|
||||
|
||||
class InstancePauseFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to pause instance: %(reason)s")
|
||||
|
||||
|
||||
class InstanceUnpauseFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to unpause instance: %(reason)s")
|
||||
|
||||
|
||||
class InstanceSuspendFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to suspend instance: %(reason)s")
|
||||
|
||||
|
||||
class InstanceResumeFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to resume instance: %(reason)s")
|
||||
|
||||
|
||||
class InstanceCloneFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to clone instance: %(reason)s")
|
||||
|
||||
|
||||
class InstanceModifyFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to modify instance: %(reason)s")
|
||||
|
||||
|
||||
class InstanceExpandvolFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to expand instance volume: %(reason)s")
|
||||
|
||||
|
||||
class InstanceAttachvolFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to attach instance volume: %(reason)s")
|
||||
|
||||
|
||||
class InstanceDetachvolFailure(nova_exc.InstanceInvalidState):
|
||||
|
||||
msg_fmt = _("Failed to detach instance volume: %(reason)s")
|
||||
|
||||
|
||||
class VolumeDeleteFailure(nova_exc.DiskNotFound):
|
||||
|
||||
msg_fmt = _("Failed to delete volume: %(reason)s")
|
||||
|
||||
|
||||
class InvalidOsOption(nova_exc.InvalidRequest):
|
||||
|
||||
msg_fmt = _("Invalid os type or os version")
|
||||
|
||||
|
||||
class ImageTooLarge(nova_exc.InvalidRequest):
|
||||
|
||||
msg_fmt = _("Disk size is smaller than image size.")
|
||||
|
||||
|
||||
class ImageCreateFailure(nova_exc.NovaException):
|
||||
|
||||
msg_fmt = _("Failed to create image: %(reason)s")
|
||||
|
||||
|
||||
class InvalidImageDir(nova_exc.NovaException):
|
||||
|
||||
msg_fmt = _("Invalid image path.")
|
||||
|
||||
|
||||
class InvalidCustomizationInfo(nova_exc.NovaException):
|
||||
|
||||
msg_fmt = _("Invalid customization info: %(reason)s.")
|
||||
|
||||
|
||||
class FusionComputeReturnException(nova_exc.ConfigDriveInvalidValue):
|
||||
|
||||
msg_fmt = _("FusionCompute exception occurred, %(reason)s.")
|
||||
|
||||
|
||||
class FusionComputeTaskException(nova_exc.Invalid):
|
||||
|
||||
msg_fmt = _("FC task exception: %(reason)s.")
|
||||
|
||||
|
||||
class SetQosIoFailure(nova_exc.Invalid):
|
||||
|
||||
msg_fmt = _("Failed to set qos io: %(reason)s")
|
||||
|
||||
|
||||
class AffinityGroupException(nova_exc.NovaException):
|
||||
|
||||
msg_fmt = _("Config affinity group exception: %(reason)s")
|
||||
|
||||
|
||||
class InstanceNameInvalid(nova_exc.Invalid):
|
||||
|
||||
msg_fmt = _("Instance name is invalid")
|
||||
|
||||
|
||||
class InvalidUdsImageInfo(nova_exc.Invalid):
|
||||
|
||||
msg_fmt = _("Invalid Uds Image info: %(reason)s.")
|
||||
|
||||
|
||||
class InvalidGlanceImageInfo(nova_exc.Invalid):
|
||||
|
||||
msg_fmt = _("Invalid Glance Image info: %(reason)s.")
|
||||
|
||||
|
||||
class InvalidFlavorExtraSpecInfo(nova_exc.Invalid):
|
||||
|
||||
msg_fmt = _("Invalid Flavor Extra Spec Info: %(reason)s.")
|
|
@ -0,0 +1,213 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.virt.fusioncomputeapi import constant
|
||||
from nova.virt.fusioncomputeapi import exception
|
||||
from nova.virt.fusioncomputeapi import restclient
|
||||
from nova.virt.fusioncomputeapi import utils
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
|
||||
|
||||
class FCBaseClient(restclient.RestClient):
|
||||
|
||||
STATUS_OK = [200, 201, 202]
|
||||
STATUS_NO_AUTH = [401]
|
||||
STATUS_INVALID = [400, 403, 404, 500, 503]
|
||||
|
||||
def __init__(self, host, user, key, user_type, api_version='6.0',
|
||||
ssl=None, port=None, cert=None, request_time_out=120):
|
||||
super(FCBaseClient, self).__init__(host, port=port, ssl=ssl, cert=cert)
|
||||
|
||||
self.__user = user
|
||||
self.__key = key
|
||||
self.__user_type = user_type
|
||||
|
||||
self.__api_version = api_version
|
||||
self.__accept = ('application/json;version=%s;charset=UTF-8' %
|
||||
api_version)
|
||||
self.__content_type = 'application/json'
|
||||
self.__accept_language = 'en_US'
|
||||
|
||||
self.__request_time_out = request_time_out
|
||||
self.__token = None
|
||||
|
||||
self.context = FCClientContext(self)
|
||||
|
||||
def _update_and_get_headers(self, headers, force_get_token):
|
||||
"""update fc rest header and return headers
|
||||
|
||||
:param headers:
|
||||
:param force_get_token:
|
||||
:return:
|
||||
"""
|
||||
if not self.__token or force_get_token:
|
||||
self.get_token()
|
||||
if not headers:
|
||||
headers_res = self._make_headers(self.__token)
|
||||
else:
|
||||
headers_res = headers.copy()
|
||||
headers_res.update(self._make_headers(self.__token))
|
||||
return headers_res
|
||||
|
||||
def request_msg(self, method, path, data=None, headers=None, **kwargs):
|
||||
req_headers = self._update_and_get_headers(headers, False)
|
||||
|
||||
# set default request time out
|
||||
kwargs['timeout'] = kwargs.get('timeout', self.__request_time_out)
|
||||
rsp = self._request(method, path, data, headers=req_headers, **kwargs)
|
||||
|
||||
if rsp.status_code in self.STATUS_NO_AUTH:
|
||||
LOG.info('token may expired, fetch again.')
|
||||
req_headers = self._update_and_get_headers(headers, True)
|
||||
rsp = self._request(method, path, data, headers=req_headers,
|
||||
**kwargs)
|
||||
|
||||
# catch message sending exception
|
||||
self._raise_if_not_in_status_ok(rsp)
|
||||
ret_data = {'response': rsp, 'data': None}
|
||||
|
||||
if rsp.text:
|
||||
try:
|
||||
ret_data['data'] = rsp.json()
|
||||
# ignore pylint:disable=W0703
|
||||
except Exception as excp:
|
||||
LOG.warn(_('failed to loads json response data, %s'), excp)
|
||||
ret_data['data'] = rsp.text
|
||||
|
||||
if kwargs.get('need_response', False):
|
||||
return ret_data
|
||||
return ret_data['data']
|
||||
|
||||
def _raise_if_not_in_status_ok(self, rsp):
|
||||
"""if response is not normal,rasise exception
|
||||
|
||||
:param rsp:
|
||||
:return:
|
||||
"""
|
||||
if rsp.status_code not in self.STATUS_OK:
|
||||
error_info = {}
|
||||
try:
|
||||
error_info = rsp.json()
|
||||
# ignore pylint:disable=W0703
|
||||
except Exception as excp:
|
||||
LOG.warn('try to get error response content failed: %s', excp)
|
||||
|
||||
raise exception.RequestError(reason=error_info.get('errorDes'),
|
||||
error_code=error_info.get('errorCode')
|
||||
)
|
||||
|
||||
def get_token(self):
|
||||
"""Get token from FC
|
||||
|
||||
:return
|
||||
"""
|
||||
response = self._request('post', constant.TOKEN_URI, data={},
|
||||
headers=self._make_headers())
|
||||
self.__token = response.headers['X-Auth-Token']
|
||||
|
||||
def get_sites(self):
|
||||
"""get fc default site info
|
||||
|
||||
:return:
|
||||
"""
|
||||
return self.get(constant.SITE_URI)
|
||||
|
||||
def get_first_site(self):
|
||||
|
||||
sites = self.get_sites()
|
||||
if not sites or not sites.get('sites'):
|
||||
raise exception.NoAvailableSite()
|
||||
return sites['sites'][0]
|
||||
|
||||
def set_default_site(self):
|
||||
|
||||
self.context.set_default_site(self.get_first_site())
|
||||
|
||||
def _make_headers(self, token=None):
|
||||
"""make token header info
|
||||
|
||||
:param token:
|
||||
:return:
|
||||
"""
|
||||
headers = {
|
||||
'Accept-Language': self.__accept_language,
|
||||
'Content-Type': self.__content_type,
|
||||
'Accept': self.__accept
|
||||
}
|
||||
|
||||
if token:
|
||||
headers.update({
|
||||
'X-Auth-Token': token
|
||||
})
|
||||
else:
|
||||
headers.update({
|
||||
'X-Auth-User': self.__user,
|
||||
'X-Auth-Key': self.__key,
|
||||
'X-Auth-UserType': self.__user_type,
|
||||
'X-ENCRIPT-ALGORITHM': '1'
|
||||
})
|
||||
return headers
|
||||
|
||||
|
||||
class FCClientContext(dict):
|
||||
"""fc base info"""
|
||||
|
||||
def __init__(self, client):
|
||||
super(FCClientContext, self).__init__()
|
||||
self.client = client
|
||||
self.site_uri_map = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""if dict has attr,return dict ,else return site uri info
|
||||
|
||||
:param name:
|
||||
:return:
|
||||
"""
|
||||
if self.get(name):
|
||||
return self.get(name)
|
||||
elif self.site_uri_map:
|
||||
return utils.get_fc_uri(name, self.site_uri_map)
|
||||
else:
|
||||
return None
|
||||
|
||||
def set_default_site(self, site):
|
||||
"""set default site infos
|
||||
|
||||
:param site:
|
||||
:return:
|
||||
"""
|
||||
self['site'] = site
|
||||
self['site_id'] = utils.get_id_from_urn(self['site']['urn'])
|
||||
self['site_uri'] = '/'.join([constant.SITE_URI, self['site_id']])
|
||||
|
||||
self.site_uri_map = {'site_uri': self['site_uri']}
|
||||
|
||||
def get_path_by_site(self, path='', **kwargs):
|
||||
"""Connect your path with default site path, and format args value
|
||||
|
||||
:param path: in format like '/resource/%<id>s/action/%(other)s'
|
||||
:param kwargs: Dictionary args, matched path format, like (id=id_value,
|
||||
other=other_value)
|
||||
:return: path like
|
||||
'/service/sites/site_id/resource/id_value/action/other_value'
|
||||
"""
|
||||
if not kwargs:
|
||||
kwargs = {}
|
||||
|
||||
if isinstance(path, list):
|
||||
path = ''.join(path)
|
||||
|
||||
return ''.join([self['site_uri'], path % kwargs])
|
|
@ -0,0 +1,220 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import time
|
||||
|
||||
|
||||
from nova.compute import power_state
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
|
||||
|
||||
from nova.virt.fusioncomputeapi import constant
|
||||
from nova.virt.fusioncomputeapi import ops_base
|
||||
from nova.virt.fusioncomputeapi import utils
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
|
||||
|
||||
class FCInstance(dict):
|
||||
"""fc vm class"""
|
||||
|
||||
def __init__(self, ini_dict):
|
||||
super(FCInstance, self).__init__()
|
||||
for key in ini_dict:
|
||||
self[key] = ini_dict[key]
|
||||
|
||||
def get_vm_action_uri(self, action):
|
||||
"""get_vm_action_uri
|
||||
|
||||
return fc vms uri info
|
||||
:param action:
|
||||
:return:
|
||||
"""
|
||||
return self.uri + constant.VM_URI_MAP[action]
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self.get(name)
|
||||
|
||||
|
||||
class FCInstanceOps(ops_base.OpsBase):
|
||||
"""fc instances manager"""
|
||||
|
||||
def _query_vm(self, **kwargs):
|
||||
"""Query VMs.
|
||||
|
||||
:param kwargs:
|
||||
name: VM name
|
||||
status: VM status
|
||||
scope: VM in certain scope
|
||||
:return: list of VMs
|
||||
"""
|
||||
return self.get(utils.build_uri_with_params(self.site.vm_uri, kwargs))
|
||||
|
||||
def _get_fc_vm(self, vm_info, limit=1, offset=0, detail=2, **kwargs):
|
||||
"""get fv vm info by conditions
|
||||
|
||||
:param vm_info:
|
||||
:param limit:
|
||||
:param offset:
|
||||
:param detail:
|
||||
:param kwargs:
|
||||
:return:
|
||||
"""
|
||||
instances = self._query_vm(limit=limit, offset=offset, detail=detail,
|
||||
**kwargs)
|
||||
if not instances or not instances['vms']:
|
||||
LOG.error(_("can not find instance %s."), vm_info)
|
||||
raise exception.InstanceNotFound(instance_id=vm_info)
|
||||
return FCInstance(instances['vms'][0])
|
||||
|
||||
def get_vm_state(self, instance):
|
||||
"""get_vm_state
|
||||
|
||||
:param instance:
|
||||
:return:
|
||||
"""
|
||||
return self.get_vm_by_uuid(instance)
|
||||
|
||||
def get_total_vm_numbers(self, **kwargs):
|
||||
"""get_total_vm_numbers
|
||||
|
||||
Get total numbers in fc
|
||||
:return:
|
||||
"""
|
||||
instances = self._query_vm(limit=1, offset=0, detail=0, **kwargs)
|
||||
if not instances or not instances.get('total'):
|
||||
return 0
|
||||
total = int(instances.get('total'))
|
||||
LOG.info(_("total instance number is %d."), total)
|
||||
return total
|
||||
|
||||
def get_all_vms_info(self, **kwargs):
|
||||
"""get_all_vms_info
|
||||
|
||||
Get all vms info by paging query
|
||||
:return: {uuid:state, ...}
|
||||
"""
|
||||
|
||||
states = {}
|
||||
|
||||
limit = 100
|
||||
total = self.get_total_vm_numbers(**kwargs)
|
||||
while len(states) < total:
|
||||
last_total = len(states)
|
||||
instances = self._query_vm(limit=limit, offset=len(states),
|
||||
detail=2, **kwargs)
|
||||
for instance in instances.get('vms'):
|
||||
if instance.get('params') is not None and instance.get(
|
||||
'params').get("externalUuid") is not None:
|
||||
states[
|
||||
instance["params"]['externalUuid']] \
|
||||
= constant.VM_POWER_STATE_MAPPING.get(
|
||||
instance['status'], power_state.NOSTATE)
|
||||
else:
|
||||
states[instance['uuid']] \
|
||||
= constant.VM_POWER_STATE_MAPPING.get(
|
||||
instance['status'], power_state.NOSTATE)
|
||||
if len(instances.get('vms')) < limit:
|
||||
break
|
||||
if last_total == len(states):
|
||||
break
|
||||
time.sleep(0.005)
|
||||
return states
|
||||
|
||||
def get_all_vms(self, **kwargs):
|
||||
"""get_all_vms
|
||||
|
||||
Get all vms by paging query
|
||||
Here only return at most 100 vms to avoid timeout in db query
|
||||
:return:
|
||||
"""
|
||||
|
||||
instances = []
|
||||
total = self.get_total_vm_numbers(**kwargs)
|
||||
while len(instances) < total:
|
||||
paging_instances = self._query_vm(limit=100, offset=len(instances),
|
||||
detail=2, **kwargs)
|
||||
instances += paging_instances.get('vms')
|
||||
break
|
||||
for instance in instances:
|
||||
if instance.get('params') is not None and instance.get(
|
||||
'params').get("externalUuid") is not None:
|
||||
instance["uuid"] = instance["params"]['externalUuid']
|
||||
return instances
|
||||
|
||||
def get_vm_by_uuid(self, instance):
|
||||
"""get_vm_by_uuid
|
||||
|
||||
get vm info by vm uuid
|
||||
:param instance: openstack vm info
|
||||
:return:inner vm info
|
||||
"""
|
||||
|
||||
try:
|
||||
vm_id = instance.system_metadata.get('fc_vm_id')
|
||||
if vm_id and vm_id.startswith('i-') and (len(vm_id) == 10):
|
||||
instance = self.get('%s/%s' % (self.site.vm_uri, vm_id))
|
||||
return FCInstance(instance)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return self._get_fc_vm_by_uuid_and_external_uuid(
|
||||
instance['uuid'], externalUuid=instance['uuid'])
|
||||
|
||||
def get_vm_by_id(self, vm_id):
|
||||
"""get_vm_by_id
|
||||
|
||||
:param vm_id:
|
||||
"""
|
||||
return self._get_fc_vm(vm_id, vmId=vm_id)
|
||||
|
||||
def get_vm_by_name(self, instance_name):
|
||||
"""get_vm_by_name
|
||||
|
||||
# NOTE: this method is used for implementing
|
||||
# nova.virt.driver.ComputeDriver#instance_exists
|
||||
:param instance_name:
|
||||
:return:
|
||||
"""
|
||||
return self._get_fc_vm(instance_name, name=instance_name)
|
||||
|
||||
def _get_fc_vm_by_uuid_and_external_uuid(
|
||||
self, vm_info, limit=1, offset=0, detail=2, **kwargs):
|
||||
"""_get_fc_vm_by_uuid_and_external_uuid
|
||||
|
||||
get fv vm info by conditions
|
||||
:param vm_info:
|
||||
:param limit:
|
||||
:param offset:
|
||||
:param detail:
|
||||
:param kwargs:
|
||||
:return:vms[0]
|
||||
"""
|
||||
# find vm by external_uuid or find vm by uuid for upgrade
|
||||
instances = self._query_vm(
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
detail=detail,
|
||||
**kwargs)
|
||||
if not instances or not instances['vms']:
|
||||
instances_by_uuids = self._query_vm(
|
||||
limit=limit, offset=offset, detail=detail, uuid=vm_info)
|
||||
if not instances_by_uuids or not instances_by_uuids['vms']:
|
||||
LOG.error(_("can not find instance %s."), vm_info)
|
||||
raise exception.InstanceNotFound(instance_id=vm_info)
|
||||
return FCInstance(instances_by_uuids['vms'][0])
|
||||
return FCInstance(instances['vms'][0])
|
||||
|
||||
FC_INSTANCE_MANAGER = FCInstanceOps(None)
|
|
@ -0,0 +1,555 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
from neutronclient.common import exceptions as neutron_client_exc
|
||||
from nova import exception
|
||||
|
||||
|
||||
from nova import context as nova_ctxt
|
||||
from nova.i18n import _
|
||||
from nova.network.neutronv2 import api as neutron_api
|
||||
from nova.virt.fusioncomputeapi import constant
|
||||
from nova.virt.fusioncomputeapi import exception as fc_exc
|
||||
from nova.virt.fusioncomputeapi import ops_task_base
|
||||
from nova.virt.fusioncomputeapi import utils
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
|
||||
class PortGroupAdapter(dict):
|
||||
"""Port group class"""
|
||||
def _make_pg_name(self, network, dvs_id):
|
||||
"""_make_pg_name
|
||||
|
||||
create pg name info
|
||||
:param network:
|
||||
:return:
|
||||
"""
|
||||
return '#'.join([network['name'], network['id'], dvs_id])
|
||||
|
||||
def _make_pg_name_suffix(self, network, dvs_id, suffix):
|
||||
"""_make_pg_name_suffix
|
||||
|
||||
create pg name info
|
||||
:param network:
|
||||
:return:
|
||||
"""
|
||||
|
||||
return '#'.join([network['name'], network['id'], dvs_id, suffix])
|
||||
|
||||
def __init__(self, network):
|
||||
super(PortGroupAdapter, self).__init__()
|
||||
|
||||
self['vlanId'] = None
|
||||
self['vxlanId'] = None
|
||||
self['isCalcTCPCheckSum'] = False
|
||||
if network['provider:network_type'] == constant.TYPE_VLAN:
|
||||
self['vlanId'] = network['provider:segmentation_id']
|
||||
elif network['provider:network_type'] == constant.TYPE_VXLAN:
|
||||
self['vxlanId'] = network['provider:segmentation_id']
|
||||
elif network['provider:network_type'] == constant.TYPE_FLAT:
|
||||
self['vlanId'] = 0
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self.get(name)
|
||||
|
||||
def to_json(self):
|
||||
"""change dict to json format"""
|
||||
return jsonutils.dumps(self)
|
||||
|
||||
|
||||
class PortGroupQueryAdapter(PortGroupAdapter):
|
||||
"""PortGroupQueryAdapter"""
|
||||
|
||||
def __init__(self, network, dvs_id):
|
||||
super(PortGroupQueryAdapter, self).__init__(network)
|
||||
self['names'] = [self._make_pg_name(network, dvs_id)]
|
||||
|
||||
|
||||
class PortGroupSuffixQueryAdapter(PortGroupAdapter):
|
||||
"""PortGroupQueryAdapter"""
|
||||
|
||||
def __init__(self, network, dvs_id, suffix):
|
||||
super(PortGroupSuffixQueryAdapter, self).__init__(network)
|
||||
|
||||
self['names'] = [self._make_pg_name_suffix(network, dvs_id, suffix)]
|
||||
self['isCalcTCPCheckSum'] = True
|
||||
|
||||
|
||||
class PortGroupCreateAdapter(PortGroupAdapter):
|
||||
"""PortGroupCreateAdapter
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, network, dvs_id):
|
||||
super(PortGroupCreateAdapter, self).__init__(network)
|
||||
|
||||
self['name'] = self._make_pg_name(network, dvs_id)
|
||||
|
||||
|
||||
class PortGroupSuffixCreateAdapter(PortGroupAdapter):
|
||||
"""PortGroupCreateAdapter
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, network, dvs_id, suffix):
|
||||
super(PortGroupSuffixCreateAdapter, self).__init__(network)
|
||||
|
||||
self['name'] = self._make_pg_name_suffix(network, dvs_id, suffix)
|
||||
self['isCalcTCPCheckSum'] = True
|
||||
|
||||
|
||||
class NetworkOps(ops_task_base.OpsTaskBase):
|
||||
"""network operation class
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fc_client, task_ops):
|
||||
super(NetworkOps, self).__init__(fc_client, task_ops)
|
||||
self._neutron = neutron_api.API()
|
||||
self.dvs_mapping = {}
|
||||
self.physnet_mapping = {}
|
||||
self._init_all_fc_dvs()
|
||||
|
||||
def _get_dvs_id_by_dvs_name(self, dvs_name=None):
|
||||
"""get dvswitch id from cache according to physical network name
|
||||
|
||||
:param dvs_name:
|
||||
:return:
|
||||
"""
|
||||
if dvs_name is None:
|
||||
return None
|
||||
|
||||
LOG.debug(_("physnet_name is %s"), dvs_name)
|
||||
|
||||
dvs_id = self.dvs_mapping.get(dvs_name)
|
||||
if not dvs_id:
|
||||
self._init_all_fc_dvs()
|
||||
else:
|
||||
if not self._is_dvs_in_hypervisor(dvs_id):
|
||||
self._init_all_fc_dvs()
|
||||
|
||||
return self.dvs_mapping.get(dvs_name)
|
||||
|
||||
def _get_dvs_id_by_physnet_name(self, physnet_name):
|
||||
|
||||
if physnet_name is None:
|
||||
return None
|
||||
|
||||
dvs_ids = self.physnet_mapping.get(physnet_name)
|
||||
|
||||
if not dvs_ids:
|
||||
self._init_all_fc_dvs()
|
||||
else:
|
||||
if not self._is_dvs_in_hypervisor(dvs_ids[0]):
|
||||
self._init_all_fc_dvs()
|
||||
|
||||
return self.physnet_mapping.get(
|
||||
physnet_name)[0] if self.physnet_mapping.get(physnet_name)\
|
||||
else None
|
||||
|
||||
def _is_dvs_in_hypervisor(self, id):
|
||||
try:
|
||||
dvs = self.get('%s/%s' % (self.site.dvswitchs_uri, str(id)))
|
||||
if 'urn' not in dvs:
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _init_all_fc_dvs(self):
|
||||
"""Send message to fc and get dvswitch info
|
||||
|
||||
:return:
|
||||
"""
|
||||
LOG.debug("loading dvs mapping ")
|
||||
dvs_map_temp = {}
|
||||
physnet_map_temp = {}
|
||||
data = self.get(self.site.dvswitchs_uri)
|
||||
if not data.get(constant.DVSWITCHS):
|
||||
raise fc_exc.DVSwitchNotFound()
|
||||
|
||||
dvs = data.get(constant.DVSWITCHS)
|
||||
if dvs and len(dvs) > 0:
|
||||
for dvswitch in dvs:
|
||||
dvs_id = utils.get_id_from_urn(dvswitch.get('urn'))
|
||||
dvs_map_temp[dvswitch["name"]] = dvs_id
|
||||
self.update_physnet_map(dvs_id, physnet_map_temp)
|
||||
|
||||
LOG.debug(
|
||||
"init all fc dvs dvs map is %s, physnet map is %s",
|
||||
jsonutils.dumps(dvs_map_temp),
|
||||
jsonutils.dumps(physnet_map_temp))
|
||||
self.dvs_mapping = dvs_map_temp
|
||||
self.physnet_mapping = physnet_map_temp
|
||||
|
||||
def update_physnet_map(self, dvs_id, physnet_map_temp):
|
||||
|
||||
dvs = self.get('%s/%s' % (self.site.dvswitchs_uri, str(dvs_id)))
|
||||
dvs['params']['physnetName'] = dvs.get('name')
|
||||
if dvs:
|
||||
# leave out enhanced network dvs by name
|
||||
if dvs.get('name') == constant.CONF.\
|
||||
fusioncompute.enhanced_network_dvs_name:
|
||||
return
|
||||
if dvs.get('params') and dvs.get('params').get('physnetName'):
|
||||
physnet_name = dvs.get('params').get('physnetName')
|
||||
if physnet_map_temp.get(physnet_name):
|
||||
physnet_map_temp.get(physnet_name).append(dvs_id)
|
||||
else:
|
||||
physnet_map_temp[physnet_name] = [dvs_id]
|
||||
|
||||
def _get_network_from_neutron(self, context, network_info):
|
||||
"""send message to neutron server to get network information
|
||||
|
||||
:param context:
|
||||
:param network_info:
|
||||
:return:
|
||||
"""
|
||||
client = neutron_api.get_client(context)
|
||||
try:
|
||||
network = client.show_network(
|
||||
network_info['id']).get('network') or {}
|
||||
except neutron_client_exc.NetworkNotFoundClient:
|
||||
raise exception.NetworkNotFound(network_id=network_info['id'])
|
||||
network['label'] = network['name']
|
||||
return network
|
||||
|
||||
def get_port_from_neutron_by_id(self, context, port_id):
|
||||
"""get port info from neutron by port id
|
||||
|
||||
:param context:
|
||||
:param port_id:
|
||||
:return:
|
||||
"""
|
||||
return self._neutron.show_port(context, port_id)
|
||||
|
||||
def get_subnet_from_neutron_by_id(self, context, subnet_id):
|
||||
"""get subnet info from neutron by neutron id
|
||||
|
||||
:param context:
|
||||
:param subnet_id:
|
||||
:return:
|
||||
"""
|
||||
return self.get_subnet_by_id(context, subnet_id)
|
||||
|
||||
def get_subnet_by_id(self, context, subnet_id):
|
||||
search_pots = {'id': subnet_id}
|
||||
data = neutron_api.get_client(context).list_subnets(**search_pots)
|
||||
ipam_subnets = data.get('subnets', [])
|
||||
result = None
|
||||
for subnet in ipam_subnets:
|
||||
if subnet_id == subnet['id']:
|
||||
result = subnet
|
||||
break
|
||||
return result
|
||||
|
||||
def get_subnet_by_port_id(self, context, port_id):
|
||||
"""get subnet form neutron by port id
|
||||
|
||||
return port item 0 subnet info
|
||||
:param context:
|
||||
:param port_id:
|
||||
:return:
|
||||
"""
|
||||
port_detail = self.get_port_from_neutron_by_id(context, port_id)
|
||||
subnet_id = None
|
||||
if port_detail and port_detail.get("port"):
|
||||
port = port_detail.get("port")
|
||||
fixed_ips = port['fixed_ips']
|
||||
if fixed_ips:
|
||||
subnet_id = fixed_ips[0]['subnet_id']
|
||||
if subnet_id:
|
||||
return self.get_subnet_from_neutron_by_id(context, subnet_id)
|
||||
else:
|
||||
return None
|
||||
|
||||
def is_enable_dhcp(self, context, port_id):
|
||||
"""check if subnet is enable dhcp
|
||||
|
||||
:param context:
|
||||
:param port_id:
|
||||
:return:
|
||||
"""
|
||||
subnet = self.get_subnet_by_port_id(context, port_id)
|
||||
if subnet:
|
||||
return subnet['enable_dhcp']
|
||||
else:
|
||||
return False
|
||||
|
||||
def ensure_network(
|
||||
self,
|
||||
network_info,
|
||||
checksum_enable=False,
|
||||
extra_specs=None):
|
||||
"""Ensure network resource on FC
|
||||
|
||||
:param network_info: network_info from nova, dictionary type
|
||||
:return:
|
||||
"""
|
||||
# NOTE: physical network only visible to admin user
|
||||
|
||||
context = nova_ctxt.get_admin_context()
|
||||
|
||||
network = self._get_network_from_neutron(context, network_info)
|
||||
LOG.info(_('get network info from neutron: %s'), network)
|
||||
network_info['checksum_enable'] = checksum_enable
|
||||
dvs_id = self.get_dvs_id(extra_specs, network)
|
||||
if not dvs_id:
|
||||
raise fc_exc.DVSwitchNotFound(
|
||||
dvs_id=network['provider:physical_network'])
|
||||
|
||||
if checksum_enable is True:
|
||||
pg_adpt = PortGroupSuffixQueryAdapter(network, dvs_id, 'checksum')
|
||||
else:
|
||||
pg_adpt = PortGroupQueryAdapter(network, dvs_id)
|
||||
|
||||
pg_data = self.query_port_group(pg_adpt)
|
||||
if not pg_data:
|
||||
try:
|
||||
if checksum_enable is True:
|
||||
pg_adpt = PortGroupSuffixCreateAdapter(
|
||||
network, dvs_id, 'checksum')
|
||||
else:
|
||||
pg_adpt = PortGroupCreateAdapter(network, dvs_id)
|
||||
|
||||
pg_data = self.create_port_group(dvs_id, pg_adpt)
|
||||
except Exception as e:
|
||||
# race condition
|
||||
LOG.warn(_('create pg failed (%s), will check it again'), e)
|
||||
pg_adpt = PortGroupQueryAdapter(network, dvs_id)
|
||||
pg_data = self.query_port_group(pg_adpt)
|
||||
|
||||
return pg_data['urn'] if pg_data else None
|
||||
|
||||
def get_dvs_id(self, extra_specs, network):
|
||||
|
||||
dvs_name = None
|
||||
physnet_name = None
|
||||
if network.get('segments'):
|
||||
for segment in network['segments']:
|
||||
if segment['provider:network_type'] == constant.TYPE_VLAN:
|
||||
LOG.info(
|
||||
_('get dvs id which network is vlan with segments'))
|
||||
physnet_name = segment['provider:physical_network']
|
||||
network.update(segment)
|
||||
break
|
||||
else:
|
||||
if network['provider:network_type'] == constant.TYPE_VXLAN:
|
||||
if not constant.CONF.fusioncompute.vxlan_dvs_name \
|
||||
and not self.is_instance_use_enhanced_network(
|
||||
extra_specs):
|
||||
raise fc_exc.DVSwitchNotFound(dvs_id='vxlan dvs name')
|
||||
dvs_name = constant.CONF.fusioncompute.vxlan_dvs_name
|
||||
else:
|
||||
LOG.info(
|
||||
_('get dvs id which network is vlan without segments'))
|
||||
physnet_name = network['provider:physical_network']
|
||||
|
||||
if self.is_instance_use_enhanced_network(extra_specs):
|
||||
if not constant.CONF.fusioncompute.enhanced_network_dvs_name:
|
||||
raise fc_exc.DVSwitchNotFound(
|
||||
dvs_id='enhanced network dvs name')
|
||||
dvs_name = constant.CONF.fusioncompute.enhanced_network_dvs_name
|
||||
|
||||
if dvs_name:
|
||||
dvs_id = self._get_dvs_id_by_dvs_name(dvs_name)
|
||||
else:
|
||||
dvs_id = self._get_dvs_id_by_physnet_name(physnet_name)
|
||||
|
||||
return dvs_id
|
||||
|
||||
def is_instance_use_enhanced_network(self, instance_extra_specs):
|
||||
if instance_extra_specs:
|
||||
instance_vnic_type = instance_extra_specs.get('instance_vnic:type')
|
||||
if instance_vnic_type and instance_vnic_type.lower() == 'enhanced':
|
||||
return True
|
||||
return False
|
||||
|
||||
def create_port_group(self, dvs_id, pg_adpt):
|
||||
"""send message to fusion compute to create a port group
|
||||
|
||||
:param dvs_id:
|
||||
:param pg_adpt:
|
||||
:return:
|
||||
"""
|
||||
ret = self.post(self.get_path_by_site(constant.PORT_GROUP_URI,
|
||||
dvs_id=dvs_id),
|
||||
data=pg_adpt.to_json())
|
||||
return ret
|
||||
|
||||
def query_port_group(self, pg_adapter):
|
||||
"""query_port_group
|
||||
|
||||
:param pg_adapter:
|
||||
:return:
|
||||
"""
|
||||
query_path = self.get_path_by_site('/portgroups')
|
||||
|
||||
ret = self.post(query_path,
|
||||
data=jsonutils.dumps({'names': pg_adapter.names}))
|
||||
|
||||
return ret['portGroups'][0] if ret and ret.get('portGroups') else None
|
||||
|
||||
def create_vsp(self, dvs_id, pg_urn, vif):
|
||||
"""send message to fusion compute to create a vsp
|
||||
|
||||
:param dvs_id:
|
||||
:param pg_urn:
|
||||
:param vif:
|
||||
:return:
|
||||
"""
|
||||
vsp_path = self.get_path_by_site(constant.VSP_URI,
|
||||
dvs_id=dvs_id)
|
||||
port_id = vif['id']
|
||||
|
||||
body = {
|
||||
'name': port_id,
|
||||
'portGroupUrn': pg_urn,
|
||||
'tags': [{'tagKey': constant.VSP_TAG_KEY, 'tagValue': port_id}]
|
||||
}
|
||||
|
||||
ret = self.post(vsp_path, data=jsonutils.dumps(body))
|
||||
|
||||
return ret
|
||||
|
||||
def delete_vsps(self, vifs):
|
||||
"""send message to fusion compute to delete vsp
|
||||
|
||||
:param vifs:
|
||||
:return:
|
||||
"""
|
||||
vsps = [self.query_vsp(vif) for vif in vifs]
|
||||
|
||||
for vsp in vsps:
|
||||
self.delete(vsp['uri'])
|
||||
|
||||
def query_vsp(self, vif):
|
||||
"""send message to fusion compute to query vsp information
|
||||
|
||||
:param vif:
|
||||
:return:
|
||||
"""
|
||||
ret = self.post(self.get_path_by_site('/vsps?limit=0&offset=1'),
|
||||
data=jsonutils.dumps([
|
||||
{
|
||||
'tagKey': constant.VSP_TAG_KEY,
|
||||
'tagValue': vif['id'],
|
||||
}
|
||||
]))
|
||||
if not ret or not ret.get('vsps'):
|
||||
raise fc_exc.VSPNotFound(vsp_id=vif['id'])
|
||||
return ret['vsps'][0]
|
||||
|
||||
def del_port_group(self, dvs_id, pg_id):
|
||||
"""send message to fusion compute to create a port group
|
||||
|
||||
:param dvs_id:
|
||||
:param pg_adpt:
|
||||
:return:
|
||||
"""
|
||||
url = self.get_path_by_site(constant.PORT_GROUP_ID_URI,
|
||||
dvs_id=dvs_id,
|
||||
pg_id=pg_id)
|
||||
self.delete(url)
|
||||
|
||||
def _get_pg_id_pg_date(self, pg_data):
|
||||
urn = pg_data.get('urn')
|
||||
if urn is None:
|
||||
return None
|
||||
|
||||
pg_data_list = re.split(':', urn)
|
||||
if len(pg_data_list) < 7:
|
||||
return None
|
||||
|
||||
pg_id = pg_data_list[6]
|
||||
return pg_id
|
||||
|
||||
def query_all_pg(self):
|
||||
query_path = self.get_path_by_site('/portgroups')
|
||||
offset = 0
|
||||
pg_list = []
|
||||
while True:
|
||||
ret = self.post(query_path,
|
||||
data=jsonutils.dumps({'limit': 100,
|
||||
'offset': offset}))
|
||||
temp_list = ret.get('portGroups')
|
||||
if isinstance(temp_list, list):
|
||||
pg_list.extend(temp_list)
|
||||
else:
|
||||
break
|
||||
|
||||
if len(temp_list) < 100:
|
||||
break
|
||||
else:
|
||||
offset = len(pg_list)
|
||||
time.sleep(0.005)
|
||||
return pg_list
|
||||
|
||||
def audit_pg(self):
|
||||
context = nova_ctxt.get_admin_context()
|
||||
networks = self._neutron.get_all(context=context)
|
||||
self._init_all_fc_dvs()
|
||||
|
||||
pg_list = self.query_all_pg()
|
||||
for pg in pg_list:
|
||||
pg_name_ayn_list = []
|
||||
try:
|
||||
pg_name_ayn_list = re.split('#', pg['name'])
|
||||
except Exception:
|
||||
pass
|
||||
if len(pg_name_ayn_list) < 3:
|
||||
continue
|
||||
|
||||
fc_network_name = pg_name_ayn_list[0]
|
||||
fc_network_id = pg_name_ayn_list[1]
|
||||
fc_dvs_id = pg_name_ayn_list[2]
|
||||
pg_id = self._get_pg_id_pg_date(pg)
|
||||
|
||||
if fc_network_name is None \
|
||||
or fc_network_id is None\
|
||||
or fc_dvs_id is None\
|
||||
or pg_id is None:
|
||||
continue
|
||||
|
||||
if fc_dvs_id not in self.dvs_mapping.values():
|
||||
continue
|
||||
pg_user = pg.get('userName')
|
||||
if pg_user is None:
|
||||
continue
|
||||
if pg_user != constant.CONF.fusioncompute.fc_user:
|
||||
continue
|
||||
|
||||
is_need_remove = True
|
||||
for network in networks:
|
||||
if network['name'] == fc_network_name \
|
||||
and network['id'] == fc_network_id:
|
||||
is_need_remove = False
|
||||
break
|
||||
|
||||
if is_need_remove:
|
||||
try:
|
||||
self.del_port_group(fc_dvs_id, pg_id)
|
||||
LOG.warn(
|
||||
'port group remove dvs_id=%s,ps_id=%s',
|
||||
fc_dvs_id,
|
||||
pg_id)
|
||||
except Exception:
|
||||
LOG.error('Error happen while delete port group remove '
|
||||
'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id)
|
||||
pass
|
|
@ -0,0 +1,100 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class OpsBase(object):
|
||||
"""OpsBase
|
||||
|
||||
fc operation base class
|
||||
"""
|
||||
|
||||
def set_client(self, fc_client):
|
||||
"""set_client
|
||||
|
||||
set client obj
|
||||
:param fc_client:
|
||||
:return:
|
||||
"""
|
||||
self.fc_client = fc_client
|
||||
if self.fc_client:
|
||||
self.site = self.fc_client.context
|
||||
else:
|
||||
self.site = None
|
||||
|
||||
def __init__(self, fc_client):
|
||||
self.fc_client = None
|
||||
self.site = None
|
||||
self.set_client(fc_client)
|
||||
|
||||
@property
|
||||
def site_id(self):
|
||||
"""site_id
|
||||
|
||||
get site id
|
||||
:return:
|
||||
"""
|
||||
return self.site['site_id']
|
||||
|
||||
def get_path_by_site(self, path=None, **kwargs):
|
||||
"""get_path_by_site
|
||||
|
||||
get rest path by site
|
||||
:param path:
|
||||
:param kwargs:
|
||||
:return:
|
||||
"""
|
||||
return self.site.get_path_by_site(path, **kwargs)
|
||||
|
||||
def post(self, path, data=None, **kwargs):
|
||||
"""post
|
||||
|
||||
Post.
|
||||
:param path: path under Context, something like '/app/resource'
|
||||
:param data: (Optional) data of request
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
return self.fc_client.post(path, data=data, **kwargs)
|
||||
|
||||
def get(self, path, **kwargs):
|
||||
"""get
|
||||
|
||||
Get.
|
||||
:param path: path under Context, something like '/app/resource/id'
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
return self.fc_client.get(path, **kwargs)
|
||||
|
||||
def put(self, path, data=None, **kwargs):
|
||||
"""put
|
||||
|
||||
Put.
|
||||
:param path: path under Context, something like '/app/resource/id'
|
||||
:param data: (Optional) data of request
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
return self.fc_client.put(path, data=data, **kwargs)
|
||||
|
||||
def delete(self, path, **kwargs):
|
||||
"""delete
|
||||
|
||||
Delete.
|
||||
:param path: path under Context, something like '/app/resource/id'
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
return self.fc_client.delete(path, **kwargs)
|
|
@ -0,0 +1,78 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.virt.fusioncomputeapi import ops_base
|
||||
from nova.virt.fusioncomputeapi import taskops
|
||||
|
||||
|
||||
class OpsTaskBase(ops_base.OpsBase):
|
||||
"""fc operation with task obj
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fc_client, task_ops):
|
||||
super(OpsTaskBase, self).__init__(fc_client)
|
||||
self.task_ops = task_ops
|
||||
|
||||
def post(self, path, data=None, excp=None, fixedInterval=0, **kwargs):
|
||||
"""Post.
|
||||
|
||||
:param path: path under Context, something like '/app/resource'
|
||||
:param data: (Optional) data of request
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
@taskops.wait_task_done(self.task_ops, excp, fixedInterval)
|
||||
def _post():
|
||||
"""inner post func
|
||||
|
||||
"""
|
||||
# ignore pylint:disable=W0142
|
||||
return super(OpsTaskBase, self).post(path, data, **kwargs)
|
||||
return _post()
|
||||
|
||||
def put(self, path, data=None, excp=None, fixedInterval=0, **kwargs):
|
||||
"""Put.
|
||||
|
||||
:param path: path under Context, something like '/app/resource/id'
|
||||
:param data: (Optional) data of request
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
@taskops.wait_task_done(self.task_ops, excp, fixedInterval)
|
||||
def _put():
|
||||
"""inner put func
|
||||
|
||||
"""
|
||||
# ignore pylint:disable=W0142
|
||||
return super(OpsTaskBase, self).put(path, data, **kwargs)
|
||||
return _put()
|
||||
|
||||
def delete(self, path, excp=None, **kwargs):
|
||||
"""Delete.
|
||||
|
||||
:param path: path under Context, something like '/app/resource/id'
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
@taskops.wait_task_done(self.task_ops, excp)
|
||||
def _delete():
|
||||
"""inner delete func
|
||||
|
||||
:return:
|
||||
"""
|
||||
# ignore pylint:disable=W0142
|
||||
return super(OpsTaskBase, self).delete(path, **kwargs)
|
||||
return _delete()
|
|
@ -0,0 +1,87 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ConfigParser
|
||||
|
||||
SPLIT_TAG = '_'
|
||||
|
||||
|
||||
class OSConfigBase(dict):
|
||||
"""Base Class
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(OSConfigBase, self).__init__()
|
||||
self._config = ConfigParser.ConfigParser()
|
||||
self._sections = []
|
||||
|
||||
def __getitem__(self, item):
|
||||
if item.lower() in self:
|
||||
return super(OSConfigBase, self).__getitem__(item.lower())
|
||||
raise KeyError
|
||||
|
||||
def get(self, k, d=None):
|
||||
if k.lower() in self:
|
||||
return super(OSConfigBase, self).get(k.lower(), d)
|
||||
return d
|
||||
|
||||
def _config_to_dict(self):
|
||||
"""Change config to dict, must implement in sub class
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def __call__(self, config_file=None):
|
||||
if config_file:
|
||||
self._config.read(config_file)
|
||||
self._sections = self._config.sections()
|
||||
self._config_to_dict()
|
||||
|
||||
|
||||
class OSConfigInt(OSConfigBase):
|
||||
"""Get Int-Type fc os version from Str_Type
|
||||
|
||||
windows 7 enterprise 32bit --> 25
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(OSConfigInt, self).__init__()
|
||||
|
||||
def _config_to_dict(self):
|
||||
for section in self._sections:
|
||||
self[section] = {}
|
||||
for key, value in self._config.items(section):
|
||||
self[section][str(key).replace(SPLIT_TAG, ' ')] = value
|
||||
|
||||
|
||||
class OSConfigStr(OSConfigBase):
|
||||
"""Get Str_Type fc os version from Int-Type
|
||||
|
||||
25 --> windows 7 enterprise 32bit
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(OSConfigStr, self).__init__()
|
||||
|
||||
def _config_to_dict(self):
|
||||
for section in self._sections:
|
||||
self[section] = {}
|
||||
for key, value in self._config.items(section):
|
||||
self[section][value] = str(key).replace(SPLIT_TAG, ' ')
|
||||
|
||||
OS_VERSION_INT = OSConfigInt()
|
||||
OS_VERSION_STR = OSConfigStr()
|
||||
VIRTUAL_IO_OS_VERSION_INT = OSConfigInt()
|
|
@ -0,0 +1,139 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import requests
|
||||
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.virt.fusioncomputeapi import utils
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
|
||||
class RestClient(object):
|
||||
"""send rest msg class
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, host, port=80, ssl=None, cert=None):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.__ssl = ssl
|
||||
self.__cert = cert
|
||||
|
||||
self.__protocol = 'http' if not self.__ssl else 'https'
|
||||
|
||||
def __repr__(self):
|
||||
"""get rest path msg
|
||||
|
||||
:return:
|
||||
"""
|
||||
return 'REST client %s://%s:%s' % (
|
||||
self.__protocol, self.host, self.port)
|
||||
|
||||
def _to_url(self, path):
|
||||
"""get rest url
|
||||
|
||||
:param path:
|
||||
:return:
|
||||
"""
|
||||
return '%s://%s:%s%s' % (
|
||||
self.__protocol, self.host, self.port, path)
|
||||
|
||||
def _request(self, method, path, data=None, headers=None, **kwargs):
|
||||
"""send request msg
|
||||
|
||||
:param method:
|
||||
:param path:
|
||||
:param data:
|
||||
:param headers:
|
||||
:param kwargs:
|
||||
:return:
|
||||
"""
|
||||
|
||||
url = self._to_url(path)
|
||||
|
||||
if not data:
|
||||
data = jsonutils.dumps({})
|
||||
elif isinstance(data, dict) or isinstance(data, list):
|
||||
data = jsonutils.dumps(data)
|
||||
|
||||
if method == 'get':
|
||||
log_fun = LOG.debug
|
||||
else:
|
||||
log_fun = LOG.info
|
||||
|
||||
try:
|
||||
data_for_log = copy.deepcopy(jsonutils.loads(data))
|
||||
utils.drop_password_key(data_for_log)
|
||||
|
||||
except Exception:
|
||||
log_fun(_('request: %s'), method)
|
||||
|
||||
rsp = requests.request(method, url, data=data, headers=headers,
|
||||
verify=False, **kwargs)
|
||||
return rsp
|
||||
|
||||
def request_msg(self, method, path, data=None, headers=None, **kwargs):
|
||||
"""send rest message base func, should achieve in child class
|
||||
|
||||
:param method:
|
||||
:param path:
|
||||
:param data:
|
||||
:param headers:
|
||||
:param kwargs:
|
||||
:return:
|
||||
"""
|
||||
return self._request(method, path, data=data, headers=headers,
|
||||
**kwargs)
|
||||
|
||||
def post(self, path, data=None, **kwargs):
|
||||
"""Post.
|
||||
|
||||
:param path: path under Context, something like '/app/resource'
|
||||
:param data: (Optional) data of request
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
return self.request_msg('post', path, data=data, **kwargs)
|
||||
|
||||
def get(self, path, **kwargs):
|
||||
"""Get.
|
||||
|
||||
:param path: path under Context, something like '/app/resource/id'
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
return self.request_msg('get', path, **kwargs)
|
||||
|
||||
def put(self, path, data=None, **kwargs):
|
||||
"""Put.
|
||||
|
||||
:param path: path under Context, something like '/app/resource/id'
|
||||
:param data: (Optional) data of request
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
return self.request_msg('put', path, data=data, **kwargs)
|
||||
|
||||
def delete(self, path, **kwargs):
|
||||
"""Delete.
|
||||
|
||||
:param path: path under Context, something like '/app/resource/id'
|
||||
:param kwargs: headers, etc.
|
||||
:return: Response object in requests
|
||||
"""
|
||||
return self.request_msg('delete', path, **kwargs)
|
|
@ -0,0 +1,148 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import random
|
||||
import time
|
||||
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.virt.fusioncomputeapi import exception as fc_exc
|
||||
from nova.virt.fusioncomputeapi import ops_base
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
from oslo_service import loopingcall
|
||||
|
||||
|
||||
def wait_task_done(task_ops, exc=None, fixedInterval=0):
|
||||
"""wait_task_done
|
||||
|
||||
Send message and wait task result. Only for the function(func) whose
|
||||
return like {"taskUrn": string, "taskUri": string} format, if you
|
||||
won't want to send and wait the result, return {} instead of
|
||||
{"taskUrn": string, "taskUri": string} format
|
||||
|
||||
:param task_ops: the task monitor object
|
||||
:param exc: when monitor the task failed, raise this exception object
|
||||
:fixedInterval: when fixedInterval =0 , the task query period is
|
||||
random(interval + random()*3).
|
||||
when fixedInterval !=0, the query period is fixed to fixedInterval
|
||||
:return:
|
||||
"""
|
||||
def wrap(func):
|
||||
"""wrap function
|
||||
|
||||
:param func: the function will be decorated
|
||||
:return:
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
"""inner function
|
||||
|
||||
:param args: the list format args of function that will
|
||||
be decorated
|
||||
:param kwargs: the dict format args of function that will
|
||||
be decorated
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
resp = func(*args, **kwargs)
|
||||
except fc_exc.RequestError as req_exc:
|
||||
if exc:
|
||||
raise exc(str(req_exc.kwargs['reason']))
|
||||
raise req_exc
|
||||
|
||||
if isinstance(resp, dict) and resp.get('taskUri'):
|
||||
if fixedInterval != 0:
|
||||
success, reason = task_ops.wait_task_done(
|
||||
resp['taskUri'], 3, fixedInterval)
|
||||
else:
|
||||
success, reason = task_ops.wait_task_done(resp['taskUri'])
|
||||
if not success:
|
||||
LOG.error(_('task failed: %s'), reason)
|
||||
if exc:
|
||||
raise exc(str(reason))
|
||||
raise fc_exc.FusionComputeTaskException(reason=reason)
|
||||
|
||||
return resp
|
||||
return inner
|
||||
return wrap
|
||||
|
||||
|
||||
class TaskOperation(ops_base.OpsBase):
|
||||
"""task operation object
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fc_client):
|
||||
"""TaskOperation init func
|
||||
|
||||
:param fc_client:
|
||||
:return:
|
||||
"""
|
||||
super(TaskOperation, self).__init__(fc_client)
|
||||
|
||||
def wait_task_done(self, task_uri, interval=3, fixedInterval=0):
|
||||
"""wait_task_done
|
||||
|
||||
:param task_uri:
|
||||
:param interval:
|
||||
:return:
|
||||
"""
|
||||
if fixedInterval == 0:
|
||||
random.seed()
|
||||
f = random.random()
|
||||
f = f * 3
|
||||
interval = interval + f
|
||||
else:
|
||||
interval = fixedInterval
|
||||
|
||||
ret = {'success': False, 'reason': None}
|
||||
|
||||
def _wait_done():
|
||||
"""wait task result
|
||||
|
||||
"""
|
||||
num = 3
|
||||
for tmp in range(num):
|
||||
try:
|
||||
task = self.get_task(task_uri)
|
||||
break
|
||||
except Exception as e:
|
||||
LOG.info(_('Get task uri falied %d') % tmp)
|
||||
if tmp >= (num - 1):
|
||||
raise e
|
||||
time.sleep(10)
|
||||
continue
|
||||
|
||||
if task['status'] == "success":
|
||||
ret['success'] = True
|
||||
raise loopingcall.LoopingCallDone()
|
||||
elif task['status'] == "failed":
|
||||
ret['reason'] = task['reasonDes']
|
||||
raise loopingcall.LoopingCallDone()
|
||||
else:
|
||||
LOG.info(_("Task [%s] is running,"), task_uri)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_wait_done)
|
||||
timer.start(interval=interval).wait()
|
||||
return ret['success'], ret['reason']
|
||||
|
||||
def get_task(self, task_uri):
|
||||
"""get task uri info
|
||||
|
||||
:param task_uri:
|
||||
:return:
|
||||
"""
|
||||
return self.get(task_uri)
|
|
@ -0,0 +1,28 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.console import type
|
||||
|
||||
|
||||
class HuaweiConsoleVNC(type.ConsoleVNC):
|
||||
|
||||
def __innit__(self, host, port, password, internal_access_path=None):
|
||||
super(
|
||||
HuaweiConsoleVNC,
|
||||
self).__init__(
|
||||
host,
|
||||
port,
|
||||
internal_access_path)
|
||||
self.password = password
|
|
@ -0,0 +1,371 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import functools
|
||||
import hashlib
|
||||
import sys
|
||||
from threading import Thread
|
||||
import traceback
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.virt.fusioncomputeapi import constant
|
||||
from nova.virt.fusioncomputeapi import exception
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def log_exception(exception=None):
|
||||
"""log_exception
|
||||
|
||||
:param exception:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if exception:
|
||||
pass
|
||||
|
||||
etype, value, track_tb = sys.exc_info()
|
||||
error_list = traceback.format_exception(etype, value, track_tb)
|
||||
for error_info in error_list:
|
||||
LOG.error(error_info)
|
||||
|
||||
|
||||
def func_log_circle(instance=None, exceptions=None):
|
||||
"""exec func, print func begin and end
|
||||
|
||||
:param instance:
|
||||
:return:
|
||||
"""
|
||||
|
||||
def wrap(func):
|
||||
"""wrap function
|
||||
|
||||
:param func: the function will be decorated
|
||||
:return:
|
||||
"""
|
||||
|
||||
def _get_func_str(step):
|
||||
"""get function pring string
|
||||
|
||||
:param step:
|
||||
:return:
|
||||
"""
|
||||
return None
|
||||
|
||||
@functools.wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
"""inner function
|
||||
|
||||
:param args: the list format args of function that will
|
||||
be decorated
|
||||
:param kwargs: the dict format args of function that will
|
||||
be decorated
|
||||
:return:
|
||||
"""
|
||||
|
||||
LOG.info(_get_func_str('begin'))
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
except Exception as excp:
|
||||
LOG.error('%s traceback begin.', _get_func_str('failed'))
|
||||
log_exception(excp)
|
||||
LOG.error('%s traceback end.', _get_func_str('failed'))
|
||||
if exceptions is not None:
|
||||
raise exceptions
|
||||
raise excp
|
||||
LOG.info(_get_func_str('success'))
|
||||
return result
|
||||
|
||||
return inner
|
||||
|
||||
return wrap
|
||||
|
||||
|
||||
def get_id_from_urn(urn, regex=constant.ID_IN_URN_REGEX):
|
||||
"""get vminfo by vm urn
|
||||
|
||||
:param urn:
|
||||
:param regex:
|
||||
:return:
|
||||
"""
|
||||
match = regex.search(urn)
|
||||
if not match:
|
||||
return ValueError(message='get id from URN failed')
|
||||
|
||||
return match.group('id')
|
||||
|
||||
|
||||
def build_uri_with_params(uri, param_map):
|
||||
"""build uri with params
|
||||
|
||||
:param uri:
|
||||
:param param_map:
|
||||
:return:
|
||||
"""
|
||||
return ''.join([
|
||||
uri,
|
||||
'?',
|
||||
'&'.join(['%s=%s' % (k, v) for (k, v) in param_map.iteritems()])
|
||||
])
|
||||
|
||||
|
||||
def generate_uri_from_urn(urn):
|
||||
"""generate uri with urn
|
||||
|
||||
urn: urn:sites:4D6B0918:clusters:640
|
||||
uri: /service/sites/4D6B0918/clusters/640
|
||||
:return:
|
||||
"""
|
||||
if urn:
|
||||
return urn.replace('urn', '/service').replace(':', '/')
|
||||
return None
|
||||
|
||||
|
||||
def generate_urn_from_uri(uri):
|
||||
"""generate uri with urn
|
||||
|
||||
uri: /service/sites/4D6B0918/clusters/640
|
||||
urn: urn:sites:4D6B0918:clusters:640
|
||||
:return:
|
||||
"""
|
||||
if uri:
|
||||
return uri.replace('/service', 'urn').replace('/', ':')
|
||||
return None
|
||||
|
||||
|
||||
def image_size_to_gb(image_size):
|
||||
"""image size sava as kb, fc disk size is gb, should trance
|
||||
|
||||
:param image_size: image bytes size
|
||||
:return:image gb size
|
||||
"""
|
||||
if not isinstance(image_size, int):
|
||||
return None
|
||||
else:
|
||||
gb_size = image_size / 1024 / 1024 / 1024
|
||||
if gb_size == 0:
|
||||
return 1
|
||||
else:
|
||||
return gb_size
|
||||
|
||||
|
||||
def image_size_to_byte(image_size):
|
||||
"""image_size_to_byte
|
||||
|
||||
:param image_size: gb
|
||||
:return:
|
||||
"""
|
||||
if not isinstance(image_size, int):
|
||||
return None
|
||||
else:
|
||||
return image_size * 1024 * 1024 * 1024
|
||||
|
||||
|
||||
def get_fc_uri(fc_uri, base_uri_map):
|
||||
"""get fc uri info
|
||||
|
||||
:param fc_uri:uri key
|
||||
:param base_uri_map:uri params map
|
||||
:return:
|
||||
"""
|
||||
baseuri = constant.FC_SITE_URI_MAP[fc_uri]['baseuri']
|
||||
dependuri = constant.FC_SITE_URI_MAP[fc_uri].get('dependuri')
|
||||
if dependuri:
|
||||
for uri_key in dependuri:
|
||||
base_uri_map[uri_key] = get_fc_uri(uri_key, base_uri_map)
|
||||
return baseuri % base_uri_map
|
||||
|
||||
|
||||
def get_boot_option_from_metadata(metadata):
|
||||
"""get_boot_option_from_metadata
|
||||
|
||||
:param metadata:
|
||||
:return:
|
||||
"""
|
||||
if not metadata:
|
||||
return constant.BOOT_OPTION_MAP['default']
|
||||
|
||||
boot_option = metadata.get('__bootDev', 'default')
|
||||
if boot_option not in constant.BOOT_OPTION_MAP:
|
||||
LOG.warn(_('Invalid __bootDev: %s, use default instead'), boot_option)
|
||||
return constant.BOOT_OPTION_MAP['default']
|
||||
|
||||
return constant.BOOT_OPTION_MAP[boot_option]
|
||||
|
||||
|
||||
def get_vnc_key_map_setting_from_metadata(metadata):
|
||||
"""get_vnc_key_map_setting_from_metadata
|
||||
|
||||
:param metadata:
|
||||
:return:
|
||||
"""
|
||||
# if metadata:
|
||||
# keymapsetting = metadata.get('__vnc_keymap', 'default')
|
||||
# if keymapsetting in constant.VNC_KEY_MAP_SETTING:
|
||||
# LOG.info(_('The keymapsetting is %s'), keymapsetting)
|
||||
# return constant.VNC_KEY_MAP_SETTING[keymapsetting]
|
||||
|
||||
# LOG.warn(_('Invalid __vnc_keymap info , use conf instead'))
|
||||
# keymapsetting = CONF.vnc_keymap
|
||||
# if keymapsetting not in constant.VNC_KEY_MAP_SETTING:
|
||||
return constant.VNC_KEY_MAP_SETTING['default']
|
||||
# return constant.VNC_KEY_MAP_SETTING[keymapsetting]
|
||||
|
||||
|
||||
def fc_qos_convert(input_dict, refer_key,
|
||||
out_key, vcpus=1):
|
||||
"""fc_qos_convert
|
||||
|
||||
:param input_dict:
|
||||
:param refer_key:
|
||||
:param out_key:
|
||||
:param vcpus:
|
||||
:return:
|
||||
"""
|
||||
rsp_dict = {}
|
||||
if input_dict is None:
|
||||
input_dict = {}
|
||||
df_values = constant.CPU_QOS_FC_DEFAULT_VALUE
|
||||
zipped = zip(refer_key, out_key, df_values)
|
||||
|
||||
for src, dst, df_value in zipped:
|
||||
value = input_dict.get(src)
|
||||
if value is None:
|
||||
if src == 'weight' or src == 'quota:cpu_shares':
|
||||
rsp_dict[dst] = df_value * vcpus
|
||||
else:
|
||||
rsp_dict[dst] = df_value
|
||||
else:
|
||||
rsp_dict[dst] = value
|
||||
return rsp_dict
|
||||
|
||||
|
||||
def dict_add(dict1=None, dict2=None):
|
||||
"""dict_add
|
||||
|
||||
:param dict1:
|
||||
:param dict2:
|
||||
:return:
|
||||
"""
|
||||
rsp_dict = {}
|
||||
if dict1:
|
||||
rsp_dict.update(dict1.items())
|
||||
if dict2:
|
||||
rsp_dict.update(dict2.items())
|
||||
return rsp_dict
|
||||
|
||||
|
||||
def split_strip(source_str, sep_str=','):
|
||||
"""split source_str,return splited str strip
|
||||
|
||||
:param source_str:
|
||||
:param sep_str:
|
||||
:return:
|
||||
"""
|
||||
if len(source_str.strip()) == 0:
|
||||
return []
|
||||
split_list = source_str.split(sep_str)
|
||||
return [split_str.strip() for split_str in split_list]
|
||||
|
||||
ENCRYPT_LIST = ['password', 'vncpassword', 'oldpassword', 'domainpassword',
|
||||
'vncoldpassword', 'vncnewpassword', 'accessKey', 'secretKey',
|
||||
'isUpdateVmPassword', 'token']
|
||||
|
||||
|
||||
def drop_password_key(data):
|
||||
"""remove json password key item
|
||||
|
||||
:param data:
|
||||
:return:
|
||||
"""
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
for key in data.keys():
|
||||
if key in ENCRYPT_LIST:
|
||||
del data[key]
|
||||
elif data[key] and isinstance(data[key], dict):
|
||||
drop_password_key(data[key])
|
||||
|
||||
|
||||
def sha256_based_key(key):
|
||||
"""generate sha256 based key
|
||||
|
||||
:param key:
|
||||
:return:
|
||||
"""
|
||||
hash_ = hashlib.sha256()
|
||||
hash_.update(key)
|
||||
return hash_.hexdigest()
|
||||
|
||||
|
||||
class TimeoutException(Exception):
|
||||
pass
|
||||
|
||||
ThreadStop = Thread._Thread__stop
|
||||
|
||||
|
||||
def timelimited(timeout):
|
||||
"""set fc request timeout len
|
||||
|
||||
:param timeout:
|
||||
:return:
|
||||
"""
|
||||
def decorator(function):
|
||||
|
||||
def decorator2(*args, **kwargs):
|
||||
class TimeLimited(Thread):
|
||||
|
||||
def __init__(self, _error=None):
|
||||
Thread.__init__(self)
|
||||
self._error = _error
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self.result = function(*args, **kwargs)
|
||||
except Exception as e:
|
||||
LOG.debug(_("TimeLimited run Exception: %s") % e)
|
||||
self._error = e
|
||||
|
||||
def _stop(self):
|
||||
if self.isAlive():
|
||||
ThreadStop(self)
|
||||
|
||||
t = TimeLimited()
|
||||
t.start()
|
||||
t.join(timeout)
|
||||
|
||||
if isinstance(t._error, TimeoutException):
|
||||
LOG.debug(_("t._error %s"), t._error)
|
||||
t._stop()
|
||||
raise exception.RequestError(reason='request fc timeout',
|
||||
error_code='503')
|
||||
if t.isAlive():
|
||||
LOG.info(_("t.isAlive"))
|
||||
t._stop()
|
||||
raise exception.TimeoutError(reason='request timeout',
|
||||
error_code='503')
|
||||
if t._error is None:
|
||||
LOG.debug(_("t._error is None"))
|
||||
return t.result
|
||||
else:
|
||||
LOG.error(_("t._error %s"), t._error)
|
||||
raise t._error
|
||||
|
||||
return decorator2
|
||||
return decorator
|
|
@ -0,0 +1,928 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
from nova.i18n import _
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from nova.virt.fusioncomputeapi import constant
|
||||
from nova.virt.fusioncomputeapi import exception as fc_exc
|
||||
from nova.virt.fusioncomputeapi.fcinstance import FC_INSTANCE_MANAGER as FC_MGR
|
||||
from nova.virt.fusioncomputeapi import ops_task_base
|
||||
from nova.virt.fusioncomputeapi import utils
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
|
||||
|
||||
class VmCreateBase(ops_task_base.OpsTaskBase):
|
||||
"""vm controller class"""
|
||||
|
||||
def __init__(self, fc_client, task_ops, instance):
|
||||
super(VmCreateBase, self).__init__(fc_client, task_ops)
|
||||
self._instance = instance
|
||||
self._key_data = self._instance.get('key_data')
|
||||
self._metadata = self._instance.get('metadata')
|
||||
|
||||
self._vm_create_body = {}
|
||||
self._volume_ops = None
|
||||
self._location = None
|
||||
self._vifs = []
|
||||
self._block_device_info = {}
|
||||
self._root_device_name = None
|
||||
self._image_meta = {}
|
||||
self._injected_files = []
|
||||
self._admin_password = None
|
||||
self._extra_specs = {}
|
||||
self._context = {}
|
||||
self._customization = {}
|
||||
self._is_support_virtual_io = False
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
context,
|
||||
volume_ops,
|
||||
location,
|
||||
vifs,
|
||||
block_device_info,
|
||||
image_meta,
|
||||
injected_files,
|
||||
admin_password,
|
||||
extra_specs,
|
||||
customization,
|
||||
resource_group_urn,
|
||||
compute_ops):
|
||||
self._volume_ops = volume_ops
|
||||
self._compute_ops = compute_ops
|
||||
self._location = location
|
||||
self._vifs = vifs
|
||||
self._block_device_info = block_device_info
|
||||
self._root_device_name = block_device_info.get('root_device_name')
|
||||
self._image_meta = image_meta
|
||||
self._injected_files = injected_files
|
||||
self._admin_password = admin_password
|
||||
self._extra_specs = extra_specs
|
||||
self._context = context
|
||||
self._customization = customization
|
||||
self._resource_group_urn = resource_group_urn
|
||||
|
||||
@property
|
||||
def image_properties(self):
|
||||
"""image mate properties
|
||||
|
||||
:return:
|
||||
"""
|
||||
if self._image_meta:
|
||||
return self._image_meta.properties
|
||||
else:
|
||||
return {}
|
||||
|
||||
def check_input(self):
|
||||
"""check function input params
|
||||
|
||||
:return:
|
||||
"""
|
||||
os_option = self.get_os_options()
|
||||
LOG.debug(_('os option: %s .'), jsonutils.dumps(os_option))
|
||||
if not (os_option['osType'] and os_option['osVersion']):
|
||||
LOG.error('Invalid os option for vm %s!', self._instance['name'])
|
||||
raise fc_exc.InvalidOsOption()
|
||||
|
||||
def get_body_ext(self):
|
||||
"""if body not enough, child class can extend
|
||||
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def build_para(self):
|
||||
"""build create body"""
|
||||
if constant.CONF.fusioncompute.instance_initial_mode == 'cloud_init':
|
||||
self._key_data = None
|
||||
self._vm_create_body = {
|
||||
'name': self._instance['display_name'],
|
||||
'description': self._instance['name'],
|
||||
'group': constant.VM_GROUP_FLAG,
|
||||
'uuid': self._instance['uuid'],
|
||||
'externalUuid': self._instance['uuid'],
|
||||
'location': self._location,
|
||||
'autoBoot': self.is_auto_boot(),
|
||||
'osOptions': self.get_os_options(),
|
||||
'vmConfig': self.get_vm_config(),
|
||||
'vmCustomization': self.get_vm_customization(),
|
||||
'publickey': self._key_data
|
||||
}
|
||||
self.get_body_ext()
|
||||
|
||||
def extend_ops_before_start(self):
|
||||
"""vm is created in stopped state, do something before start
|
||||
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
def create_and_boot_vm(self):
|
||||
"""create vm interface func
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.check_input()
|
||||
self.build_para()
|
||||
self.create_vm()
|
||||
|
||||
# VM is created in stopped state in some cases,
|
||||
# do the extended ops in subclass and start it at last
|
||||
if not self.is_auto_boot():
|
||||
self.inject_files()
|
||||
|
||||
# Other opeation when vm stoped
|
||||
self.extend_ops_before_start()
|
||||
self._compute_ops.start_vm(self._instance, self._block_device_info)
|
||||
|
||||
def get_cpu_info(self):
|
||||
"""get vm cpu info"""
|
||||
cpu_info = {'quantity': self._instance['vcpus']}
|
||||
cpu_qos = utils.fc_qos_convert(self._extra_specs,
|
||||
constant.CPU_QOS_NOVA_KEY,
|
||||
constant.CPU_QOS_FC_KEY,
|
||||
cpu_info.get('quantity'))
|
||||
cpu_info = utils.dict_add(cpu_info, cpu_qos)
|
||||
|
||||
numa_nodes = self._extra_specs.get('hw:numa_nodes', None)
|
||||
if numa_nodes is not None:
|
||||
LOG.debug(_('numa_nodes %s'), numa_nodes)
|
||||
_core_per_socket = int(self._instance['vcpus']) / int(numa_nodes)
|
||||
cpu_info['coresPerSocket'] = _core_per_socket
|
||||
LOG.debug(_('_core_per_socket %d'), _core_per_socket)
|
||||
|
||||
return cpu_info
|
||||
|
||||
def get_memory_info(self):
|
||||
"""get vm memory info"""
|
||||
return {
|
||||
'quantityMB': self._instance['memory_mb']
|
||||
}
|
||||
|
||||
def get_disks_info(self):
|
||||
"""get vm disk specific info"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_nic_info(self):
|
||||
"""get vm nic info"""
|
||||
return [
|
||||
{
|
||||
'name': vif['network_info']['id'],
|
||||
'portId': vif['network_info']['id'],
|
||||
'mac': vif['network_info']['address'],
|
||||
'portGroupUrn': vif['pg_urn'],
|
||||
'sequenceNum': vif['sequence_num'],
|
||||
'virtIo': 1 if self._is_support_virtual_io else 0
|
||||
}
|
||||
for vif in self._vifs
|
||||
]
|
||||
|
||||
def get_fc_os_options(self, os_type, os_version):
|
||||
"""get fc options
|
||||
|
||||
:param os_type:
|
||||
:param os_version:
|
||||
:return:
|
||||
"""
|
||||
if os_type is None:
|
||||
os_type = ''
|
||||
if os_version is None:
|
||||
os_version = ''
|
||||
|
||||
fc_os_type = constant.HUAWEI_OS_TYPE_MAP.\
|
||||
get(os_type.lower(), constant.DEFAULT_HUAWEI_OS_TYPE)
|
||||
|
||||
# 201=Other_Windows(32_bit),301=Other_Linux(32_bit),401=Other(32_bit)
|
||||
# using hard code for default os_version value.
|
||||
# if huawei-os-config.conf has been changed,
|
||||
# those code should be modified also.
|
||||
if fc_os_type == 'Windows':
|
||||
fc_os_version = \
|
||||
constant.HUAWEI_OS_VERSION_INT[fc_os_type].\
|
||||
get(os_version.lower(), 201)
|
||||
elif fc_os_type == 'Linux':
|
||||
fc_os_version = \
|
||||
constant.HUAWEI_OS_VERSION_INT[fc_os_type].\
|
||||
get(os_version.lower(), 301)
|
||||
else:
|
||||
fc_os_version = \
|
||||
constant.HUAWEI_OS_VERSION_INT[fc_os_type].\
|
||||
get(os_version.lower(), 401)
|
||||
|
||||
if fc_os_version in constant.VIRTUAL_IO_OS_LIST:
|
||||
self._is_support_virtual_io = True
|
||||
|
||||
return {
|
||||
'osType': fc_os_type,
|
||||
'osVersion': fc_os_version
|
||||
}
|
||||
|
||||
def get_os_options(self):
|
||||
"""get vm os info
|
||||
|
||||
get os Type from mata
|
||||
:return:
|
||||
"""
|
||||
os_type = "other"
|
||||
os_version = "other"
|
||||
return self.get_fc_os_options(os_type, os_version)
|
||||
|
||||
def get_properties(self):
|
||||
"""get vm property"""
|
||||
vm_properties = {
|
||||
'bootOption': utils.get_boot_option_from_metadata(
|
||||
self._metadata),
|
||||
'vmVncKeymapSetting': utils.get_vnc_key_map_setting_from_metadata(
|
||||
self._metadata)}
|
||||
hpet_support = self._extra_specs.get('extra_spec:bios:hpet')
|
||||
if hpet_support is not None:
|
||||
LOG.debug(_('hpet_support %s'), hpet_support)
|
||||
if str(hpet_support).lower() == 'enabled':
|
||||
vm_properties['isHpet'] = True
|
||||
secure_vm_type = self._extra_specs.get('secuirty:instance_type')
|
||||
if secure_vm_type and str(secure_vm_type).upper() == 'GVM':
|
||||
vm_properties['secureVmType'] = 'GVM'
|
||||
elif secure_vm_type and str(secure_vm_type).upper() == 'SVM':
|
||||
vm_properties['secureVmType'] = 'SVM'
|
||||
|
||||
return vm_properties
|
||||
|
||||
def get_gpu_info(self):
|
||||
gpu_info = []
|
||||
enable_gpu = self._extra_specs.get('pci_passthrough:enable_gpu')
|
||||
gpu_number = self._extra_specs.get('pci_passthrough:gpu_number')
|
||||
gpu_specs = self._extra_specs.get('pci_passthrough:gpu_specs')
|
||||
|
||||
if enable_gpu and str(enable_gpu).upper() == 'TRUE':
|
||||
if gpu_specs:
|
||||
gpu_specs = gpu_specs.split(':')
|
||||
if gpu_specs is None or len(gpu_specs) != 3:
|
||||
reason = 'Invalid flavor extra spec info: ' \
|
||||
'gpu_specs is %s' % gpu_specs
|
||||
LOG.error(reason)
|
||||
raise fc_exc.InvalidFlavorExtraSpecInfo(reason=reason)
|
||||
else:
|
||||
# gpu_alias = gpu_specs[0] # reserve property
|
||||
gpu_mode = gpu_specs[1]
|
||||
gpu_number = gpu_specs[2]
|
||||
for i in range(int(gpu_number)):
|
||||
gpu_info.append({'gpuUrn': 'auto', 'mode': gpu_mode})
|
||||
return True, gpu_info
|
||||
elif gpu_number and int(gpu_number) > 0:
|
||||
for i in range(int(gpu_number)):
|
||||
gpu_info.append({'gpuUrn': 'auto'})
|
||||
return True, gpu_info
|
||||
else:
|
||||
reason = 'Invalid flavor extra spec info:gpu_number is %s,' \
|
||||
' gpu_specs is %s' % (gpu_number, gpu_specs)
|
||||
LOG.error(reason)
|
||||
raise fc_exc.InvalidFlavorExtraSpecInfo(reason=reason)
|
||||
return False, gpu_info
|
||||
|
||||
def get_vm_config(self):
|
||||
"""get vm config info"""
|
||||
config = {
|
||||
'cpu': self.get_cpu_info(),
|
||||
'memory': self.get_memory_info(),
|
||||
'disks': self.get_disks_info(),
|
||||
'nics': self.get_nic_info(),
|
||||
'properties': self.get_properties()
|
||||
}
|
||||
|
||||
(ret, gpu_info) = self.get_gpu_info()
|
||||
if ret:
|
||||
config['gpu'] = gpu_info
|
||||
config['memory']['reservation'] = config['memory']['quantityMB']
|
||||
|
||||
# reserve cdrom mount device for vm.
|
||||
# The value None represent not reserve,
|
||||
# default is None for Uxy
|
||||
# default is xvdd for private cloud
|
||||
if constant.CONF.fusioncompute.reserve_disk_symbol is not None and str(
|
||||
constant.CONF.fusioncompute.reserve_disk_symbol).\
|
||||
upper() == 'FALSE':
|
||||
config['cdromSequenceNum'] = constant.CONF.fusioncompute. \
|
||||
cdrom_sequence_num
|
||||
|
||||
return config
|
||||
|
||||
def _get_inject_ip_flag(self, port_id):
|
||||
"""vnic_info:<port_uuid>":"enable_ip_inject:true|false"
|
||||
|
||||
:param port_id:
|
||||
:return:
|
||||
"""
|
||||
inject_ip_flag = False
|
||||
vnic_info = self._metadata.get("vnic_info:%s" % port_id)
|
||||
try:
|
||||
if isinstance(vnic_info, unicode):
|
||||
for t in vnic_info.strip().split(','):
|
||||
if t.startswith('enable_ip_inject'):
|
||||
flag_str = t.strip().split(':')[1]
|
||||
flag_str = flag_str.lower()
|
||||
inject_ip_flag = (flag_str == 'true')
|
||||
except Exception as e:
|
||||
LOG.error("network param error: %s", vnic_info)
|
||||
LOG.error("exception: %s", e)
|
||||
return inject_ip_flag
|
||||
|
||||
def _get_vm_customization_nics(self):
|
||||
"""get vm customization nics"""
|
||||
cus_nics = []
|
||||
for vif in self._vifs:
|
||||
if self._get_inject_ip_flag(vif['network_info']['id']) is False:
|
||||
cus_nic = {
|
||||
'sequenceNum': vif['sequence_num'] + 1
|
||||
}
|
||||
cus_nics.append(cus_nic)
|
||||
continue
|
||||
|
||||
network = vif['network_info']['network']
|
||||
subnet_ipv4_list = [s for s in network['subnets']
|
||||
if s['version'] == constant.IPV4_VERSION]
|
||||
if len(subnet_ipv4_list) > 0:
|
||||
ip_ipv4 = None
|
||||
|
||||
dns = [None, None]
|
||||
if len(subnet_ipv4_list[0]['ips']) > 0:
|
||||
ip_ipv4 = subnet_ipv4_list[0]['ips'][0]
|
||||
|
||||
dns_len = len(subnet_ipv4_list[0]['dns'])
|
||||
for index in range(0, min(2, dns_len)):
|
||||
dns[index] = subnet_ipv4_list[0]['dns'][index]['address']
|
||||
|
||||
netmask_ipv4 = str(subnet_ipv4_list[0].as_netaddr().netmask)
|
||||
gateway_ipv4 = subnet_ipv4_list[0]['gateway']['address']
|
||||
|
||||
cus_nic = {'sequenceNum': vif['sequence_num'] + 1,
|
||||
'ip': ip_ipv4 and ip_ipv4['address'] or '',
|
||||
'gateway': gateway_ipv4,
|
||||
'netmask': netmask_ipv4,
|
||||
'ipVersion': constant.IPV4_VERSION,
|
||||
'setdns': dns[0],
|
||||
'adddns': dns[1]}
|
||||
cus_nics.append(cus_nic)
|
||||
|
||||
LOG.debug(_('cus_nic: %s.'), jsonutils.dumps(cus_nics))
|
||||
return cus_nics
|
||||
|
||||
def _validate_customization(self, customization):
|
||||
"""_validate_customization
|
||||
|
||||
:return:
|
||||
"""
|
||||
|
||||
valid_customizations = [
|
||||
'hostname',
|
||||
'workgroup',
|
||||
'domain',
|
||||
'domainName',
|
||||
'domainPassword',
|
||||
'ouName'
|
||||
]
|
||||
|
||||
for key in customization.keys():
|
||||
if key not in valid_customizations:
|
||||
msg = _("Invalid key: %s") % key
|
||||
raise fc_exc.InvalidCustomizationInfo(reason=msg)
|
||||
|
||||
def get_vm_customization(self):
|
||||
"""get vm custom info"""
|
||||
|
||||
vm_custom_body = {}
|
||||
|
||||
if constant.CONF.fusioncompute.instance_initial_mode == 'cloud_init':
|
||||
vm_custom_body['isUpdateVmPassword'] = False
|
||||
vm_custom_body['osType'] = self.get_os_options()['osType']
|
||||
return vm_custom_body
|
||||
|
||||
inject_pwd_flag = self._metadata.get('__inject_pwd')
|
||||
if inject_pwd_flag is False or inject_pwd_flag == 'False':
|
||||
vm_custom_body['isUpdateVmPassword'] = False
|
||||
|
||||
if self.get_os_options()['osType'] == 'Other':
|
||||
if len(vm_custom_body):
|
||||
return vm_custom_body
|
||||
return None
|
||||
|
||||
vm_custom_body['osType'] = self.get_os_options()['osType']
|
||||
vm_custom_body['password'] = self._admin_password
|
||||
vm_custom_body['nicSpecification'] = self._get_vm_customization_nics()
|
||||
|
||||
self._validate_customization(self._customization)
|
||||
for key in self._customization.keys():
|
||||
vm_custom_body[key] = self._customization[key]
|
||||
|
||||
return vm_custom_body
|
||||
|
||||
def is_auto_boot(self):
|
||||
"""get auto boot"""
|
||||
if len(self._injected_files):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def inject_files(self):
|
||||
"""inject_files
|
||||
|
||||
:return:
|
||||
"""
|
||||
if constant.CONF.fusioncompute.fusioncompute_file_inject == 'disabled':
|
||||
LOG.debug(_('inject files use fusioncompute is disabled.'))
|
||||
return
|
||||
fc_vm = FC_MGR.get_vm_by_uuid(self._instance)
|
||||
for (path, contents) in self._injected_files:
|
||||
body = {
|
||||
'fileName': path,
|
||||
'vmData': contents
|
||||
}
|
||||
self.post(fc_vm.get_vm_action_uri('set_vm_data'), data=body)
|
||||
LOG.debug(_('inject file %s succeed.') % path)
|
||||
|
||||
def create_vm(self):
|
||||
"""create vm interface
|
||||
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class VmCreateByImport(VmCreateBase):
|
||||
"""create vm use import vm interface
|
||||
|
||||
"""
|
||||
|
||||
def get_protocol(self):
|
||||
"""get nfs or null"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_vm(self):
|
||||
"""create vm by import interface
|
||||
|
||||
:return:
|
||||
"""
|
||||
self.post(self.site.import_vm_uri, data=self._vm_create_body,
|
||||
excp=fc_exc.FusionComputeReturnException, fixedInterval=1)
|
||||
|
||||
def is_auto_boot(self):
|
||||
"""get auto boot"""
|
||||
if len(self._injected_files):
|
||||
return False
|
||||
if self._compute_ops.get_local_disk_property(self._instance):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def get_body_ext(self):
|
||||
"""import vm extend params
|
||||
|
||||
:return:
|
||||
"""
|
||||
self._vm_create_body['protocol'] = self.get_protocol()
|
||||
if self._resource_group_urn:
|
||||
self._vm_create_body['resourceGroup'] = self._resource_group_urn
|
||||
if self._extra_specs:
|
||||
instance_vnic_type = self._extra_specs.get('instance_vnic:type')
|
||||
if instance_vnic_type and instance_vnic_type.lower() == 'enhanced':
|
||||
instance_vnic_bandwidth = self._extra_specs.get(
|
||||
'instance_vnic:instance_bandwidth')
|
||||
instance_vnic_max_count = self._extra_specs.get(
|
||||
'instance_vnic:max_count')
|
||||
if instance_vnic_bandwidth is not None \
|
||||
and instance_vnic_max_count is not None:
|
||||
self._vm_create_body['bandwidth'] = int(
|
||||
instance_vnic_bandwidth)
|
||||
self._vm_create_body['maxVnic'] = int(
|
||||
instance_vnic_max_count)
|
||||
|
||||
is_multi_disk_speedup = self._extra_specs.get(
|
||||
'extra_spec:io:persistent_grant')
|
||||
if is_multi_disk_speedup \
|
||||
and is_multi_disk_speedup.lower() == 'true':
|
||||
self._vm_create_body[
|
||||
'isMultiDiskSpeedup'] = is_multi_disk_speedup
|
||||
|
||||
def extend_ops_before_start(self):
|
||||
"""create with local disk, local disk should attach when vm stoped
|
||||
|
||||
:return:
|
||||
"""
|
||||
self._compute_ops.create_and_attach_local_disk_before_start(
|
||||
self._instance, self._block_device_info)
|
||||
|
||||
|
||||
class VmCreateWithVolume(VmCreateByImport):
|
||||
"""create vm with volume"""
|
||||
|
||||
def get_protocol(self):
|
||||
"""get null"""
|
||||
return "null"
|
||||
|
||||
def get_disks_info(self):
|
||||
"""override get vm disk specific info"""
|
||||
|
||||
LOG.debug(_('prepare volume'))
|
||||
|
||||
disks_info = []
|
||||
for disk in self._volume_ops.ensure_volume(self._block_device_info):
|
||||
disk_info = {
|
||||
'volumeUrn': disk['urn'],
|
||||
'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']
|
||||
}
|
||||
|
||||
if disk['mount_device'] == self._root_device_name:
|
||||
disk_info['sequenceNum'] = 1
|
||||
else:
|
||||
disk_info['sequenceNum'] = self._compute_ops.get_sequence_num(
|
||||
disk['urn'], disk['mount_device'])
|
||||
|
||||
disks_info.append(disk_info)
|
||||
return disks_info
|
||||
|
||||
def get_os_options(self):
|
||||
"""get vm os info"""
|
||||
if self._instance._task_state == 'rebuild_spawning':
|
||||
# os_type = self.image_properties.get(constant.HUAWEI_OS_TYPE)
|
||||
# os_version =
|
||||
# self.image_properties.get(constant.HUAWEI_OS_VERSION)
|
||||
# if os_type:
|
||||
# return self.get_fc_os_options(os_type, os_version)
|
||||
# else:
|
||||
return super(VmCreateWithVolume, self).get_os_options()
|
||||
|
||||
# get os Type from mata
|
||||
meta_data = self._volume_ops.\
|
||||
get_block_device_meta_data(self._context, self._block_device_info)
|
||||
if meta_data:
|
||||
volume_meta_data = meta_data.get('volume_image_metadata')
|
||||
if volume_meta_data:
|
||||
os_type = volume_meta_data.get(constant.HUAWEI_OS_TYPE)
|
||||
os_version = volume_meta_data.get(constant.HUAWEI_OS_VERSION)
|
||||
if os_type:
|
||||
return self.get_fc_os_options(os_type, os_version)
|
||||
|
||||
return super(VmCreateWithVolume, self).get_os_options()
|
||||
|
||||
|
||||
class VmCreateWithImage(VmCreateByImport):
|
||||
"""create vm with image"""
|
||||
|
||||
def get_protocol(self):
|
||||
"""default protocol is glance"""
|
||||
return "glance"
|
||||
|
||||
def get_os_options(self):
|
||||
"""get vm os info"""
|
||||
|
||||
# get os Type from mata
|
||||
# os_type = self.image_properties.get(constant.HUAWEI_OS_TYPE)
|
||||
# os_version = self.image_properties.
|
||||
# get(constant.HUAWEI_OS_VERSION)
|
||||
# if os_type:
|
||||
# return self.get_fc_os_options(os_type, os_version)
|
||||
# else:
|
||||
return super(VmCreateWithImage, self).get_os_options()
|
||||
|
||||
def _get_image_size(self):
|
||||
"""get image size info"""
|
||||
image_size = self._image_meta.size
|
||||
if image_size:
|
||||
return utils.image_size_to_gb(image_size)
|
||||
else:
|
||||
return 0
|
||||
|
||||
def check_input(self):
|
||||
"""create vm image detail check
|
||||
|
||||
:return:
|
||||
"""
|
||||
super(VmCreateWithImage, self).check_input()
|
||||
|
||||
disk_quantity_gb = self._instance['root_gb']
|
||||
image_size = self._get_image_size()
|
||||
if image_size > disk_quantity_gb:
|
||||
LOG.error(_("image is larger than sys-vol."))
|
||||
raise fc_exc.ImageTooLarge
|
||||
|
||||
def get_disks_info(self):
|
||||
"""get image disk detail info"""
|
||||
|
||||
LOG.debug(_('prepare volume'))
|
||||
|
||||
disks_info = []
|
||||
|
||||
# sys vol info
|
||||
sys_disk_info = {
|
||||
'sequenceNum': 1,
|
||||
'quantityGB': self._instance['root_gb'],
|
||||
'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']
|
||||
}
|
||||
disks_info.append(sys_disk_info)
|
||||
|
||||
# user vol info
|
||||
for disk in self._volume_ops.ensure_volume(self._block_device_info):
|
||||
user_disk_info = {
|
||||
'volumeUrn': disk['urn'],
|
||||
'sequenceNum': self._compute_ops.get_sequence_num(
|
||||
disk['urn'],
|
||||
disk['mount_device']),
|
||||
'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']}
|
||||
disks_info.append(user_disk_info)
|
||||
|
||||
return disks_info
|
||||
|
||||
|
||||
class VmCreateWithNfsImage(VmCreateWithImage):
|
||||
"""create vm with nfs image"""
|
||||
|
||||
def get_protocol(self):
|
||||
"""get nfs protocol"""
|
||||
return "nfs"
|
||||
|
||||
def _get_template_url(self):
|
||||
"""get nfs location"""
|
||||
return self.image_properties[constant.HUAWEI_IMAGE_LOCATION]
|
||||
|
||||
def get_body_ext(self):
|
||||
"""create vm with image, extend url info
|
||||
|
||||
:return:
|
||||
"""
|
||||
super(VmCreateWithNfsImage, self).get_body_ext()
|
||||
self._vm_create_body['url'] = self._get_template_url()
|
||||
|
||||
|
||||
class VmCreateWithUdsImage(VmCreateWithImage):
|
||||
"""create vm with uds image"""
|
||||
|
||||
"""create vm use import vm interface"""
|
||||
|
||||
def __init__(self, fc_client, task_ops, instance):
|
||||
super(
|
||||
VmCreateWithUdsImage,
|
||||
self).__init__(
|
||||
fc_client,
|
||||
task_ops,
|
||||
instance)
|
||||
self.usd_image_server_ip = None
|
||||
self.usd_image_port = None
|
||||
self.usd_image_bucket_name = None
|
||||
self.usd_image_key = None
|
||||
|
||||
def _get_uds_image_info(self, image_location):
|
||||
"""_get_uds_image_info
|
||||
|
||||
:param image_location: {ip}:{port}:{buket name}:{key}
|
||||
192.168.0.1:5443:region1.glance:001
|
||||
"""
|
||||
|
||||
if image_location:
|
||||
uds_image_info = image_location.strip()
|
||||
str_array = re.split(":", uds_image_info)
|
||||
if len(str_array) == 4:
|
||||
return str_array[0], \
|
||||
str_array[1], \
|
||||
str_array[2], \
|
||||
str_array[3]
|
||||
reason = _("Invalid uds image info,invalid image_location!")
|
||||
raise fc_exc.InvalidUdsImageInfo(reason=reason)
|
||||
|
||||
def check_input(self):
|
||||
super(VmCreateWithUdsImage, self).check_input()
|
||||
|
||||
properties = self._image_meta.properties
|
||||
if properties:
|
||||
try:
|
||||
self.usd_image_server_ip, \
|
||||
self.usd_image_port, \
|
||||
self.usd_image_bucket_name, \
|
||||
self.usd_image_key = \
|
||||
self._get_uds_image_info(
|
||||
properties.get(constant.HUAWEI_IMAGE_LOCATION))
|
||||
except Exception:
|
||||
reason = _("Invalid uds image info,invalid loaction!")
|
||||
raise fc_exc.InvalidUdsImageInfo(reason=reason)
|
||||
|
||||
if constant.CONF.fusioncompute.uds_access_key is '' \
|
||||
or constant.CONF.fusioncompute.uds_secret_key is '':
|
||||
reason = _("Invalid uds image info,invalid AK SK!")
|
||||
raise fc_exc.InvalidUdsImageInfo(reason=reason)
|
||||
|
||||
def get_protocol(self):
|
||||
"""get uds protocol"""
|
||||
return "uds"
|
||||
|
||||
def get_body_ext(self):
|
||||
"""get_body_ext
|
||||
|
||||
create vm with image, extend uds info
|
||||
:return:
|
||||
"""
|
||||
super(VmCreateWithUdsImage, self).get_body_ext()
|
||||
self._vm_create_body['s3Config'] = {
|
||||
'serverIp': self.usd_image_server_ip,
|
||||
'port': self.usd_image_port,
|
||||
'accessKey': constant.CONF.fusioncompute.uds_access_key,
|
||||
'secretKey': constant.CONF.fusioncompute.uds_secret_key,
|
||||
'bucketName': self.usd_image_bucket_name,
|
||||
'key': self.usd_image_key
|
||||
}
|
||||
|
||||
|
||||
class VmCreateWithGlanceImage(VmCreateWithImage):
|
||||
"""create vm with glance image"""
|
||||
|
||||
def check_input(self):
|
||||
super(VmCreateWithGlanceImage, self).check_input()
|
||||
|
||||
if constant.CONF.fusioncompute.glance_server_ip is '':
|
||||
reason = _("Invalid glance image info,invalid server ip!")
|
||||
raise fc_exc.InvalidGlanceImageInfo(reason=reason)
|
||||
|
||||
def get_body_ext(self):
|
||||
"""get_body_ext
|
||||
|
||||
create vm with image, extend glance info
|
||||
:return:
|
||||
"""
|
||||
super(VmCreateWithGlanceImage, self).get_body_ext()
|
||||
self._vm_create_body['glanceConfig'] = {
|
||||
'endPoint': ':'.join([str(constant.CONF.fusioncompute.host),
|
||||
str(constant.CONF.fusioncompute.port)]),
|
||||
'serverIp': constant.CONF.fusioncompute.glance_server_ip,
|
||||
'token': self._context.auth_token,
|
||||
'imageID': self._image_meta.id
|
||||
}
|
||||
|
||||
|
||||
class VmCreateByClone(VmCreateBase):
|
||||
"""create vm use import vm interface
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fc_client, task_ops, instance):
|
||||
super(VmCreateByClone, self).__init__(fc_client, task_ops, instance)
|
||||
self._need_attach_user_vols = False
|
||||
self._cloned_source_vm_or_tpl = None
|
||||
|
||||
def is_auto_boot(self):
|
||||
"""is_auto_boot
|
||||
|
||||
:return:
|
||||
"""
|
||||
if len(self._block_device_info.get('block_device_mapping')):
|
||||
self._need_attach_user_vols = True
|
||||
return False
|
||||
else:
|
||||
return super(VmCreateByClone, self).is_auto_boot()
|
||||
|
||||
def get_os_options(self):
|
||||
"""get vm os info"""
|
||||
|
||||
# get os Type from mata
|
||||
# os_type = self.image_properties.get(constant.HUAWEI_OS_TYPE)
|
||||
# os_version = self.image_properties.get(constant.HUAWEI_OS_VERSION)
|
||||
# if os_type:
|
||||
# return self.get_fc_os_options(os_type, os_version)
|
||||
# else:
|
||||
return super(VmCreateByClone, self).get_os_options()
|
||||
|
||||
def get_disks_info(self):
|
||||
"""get_disks_info
|
||||
|
||||
FC itself will clone disks belonging to this tpl/vm(it should and
|
||||
must has only one sys volume).
|
||||
"""
|
||||
LOG.debug(_('prepare volume'))
|
||||
disks_info = []
|
||||
disk_sequence = 1
|
||||
|
||||
# sys vol info
|
||||
sys_disk_info = {
|
||||
'sequenceNum': disk_sequence,
|
||||
'quantityGB': self._instance['root_gb'],
|
||||
'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']
|
||||
}
|
||||
disks_info.append(sys_disk_info)
|
||||
|
||||
return disks_info
|
||||
|
||||
def get_body_ext(self):
|
||||
"""if body not enough, child class can extend
|
||||
|
||||
:return:
|
||||
"""
|
||||
if "uuid" in self._vm_create_body:
|
||||
self._vm_create_body.pop("uuid")
|
||||
self._vm_create_body["clonedVmUUID"] = self._instance['uuid']
|
||||
|
||||
def extend_ops_before_start(self):
|
||||
"""create by clone, user vol should attach when vm stoped
|
||||
|
||||
:return:
|
||||
"""
|
||||
if self._need_attach_user_vols:
|
||||
self._attach_user_vols()
|
||||
|
||||
def _attach_user_vols(self):
|
||||
"""_attach_user_vols
|
||||
|
||||
:return:
|
||||
"""
|
||||
fc_vm = FC_MGR.get_vm_by_uuid(self._instance)
|
||||
for disk in self._volume_ops.ensure_volume(self._block_device_info):
|
||||
body = {
|
||||
'volUrn': disk['urn'],
|
||||
'sequenceNum': self._compute_ops.get_sequence_num(
|
||||
disk['urn'],
|
||||
disk['mount_device'])}
|
||||
LOG.debug(_("begin attach user vol: %s"), disk['urn'])
|
||||
self._volume_ops.attach_volume(fc_vm, vol_config=body)
|
||||
|
||||
def create_vm(self):
|
||||
self.post(self._cloned_source_vm_or_tpl.get_vm_action_uri('clone'),
|
||||
data=self._vm_create_body,
|
||||
excp=fc_exc.InstanceCloneFailure)
|
||||
|
||||
|
||||
class VmCreateWithTemplate(VmCreateByClone):
|
||||
"""create vm with image"""
|
||||
|
||||
def check_input(self):
|
||||
super(VmCreateWithTemplate, self).check_input()
|
||||
|
||||
properties = self._image_meta.properties
|
||||
if properties:
|
||||
try:
|
||||
self._cloned_source_vm_or_tpl = \
|
||||
self._get_vm_by_template_url(
|
||||
properties.get(constant.HUAWEI_IMAGE_LOCATION))
|
||||
self._validate_template(self._cloned_source_vm_or_tpl)
|
||||
except Exception:
|
||||
LOG.error(_("Invalid FusionCompute template !"))
|
||||
raise fc_exc.InstanceCloneFailure
|
||||
|
||||
def get_body_ext(self):
|
||||
"""if body not enough, child class can extend
|
||||
|
||||
:return:
|
||||
"""
|
||||
super(VmCreateWithTemplate, self).get_body_ext()
|
||||
self._vm_create_body['isTemplate'] = False
|
||||
|
||||
is_link_clone = self._metadata.get(constant.HUAWEI_IS_LINK_CLONE)
|
||||
if is_link_clone:
|
||||
self._vm_create_body['isLinkClone'] = is_link_clone
|
||||
|
||||
def _get_vm_by_template_url(self, template_url):
|
||||
"""_get_vm_by_template_url
|
||||
|
||||
:param template_url: {vrm site id}:{vm id}
|
||||
239d8a8e:i-00000061
|
||||
"""
|
||||
|
||||
vm_id = None
|
||||
if template_url:
|
||||
url = template_url.strip()
|
||||
str_array = re.split(":", url)
|
||||
if len(str_array) == 2:
|
||||
vm_id = str_array[1]
|
||||
|
||||
if vm_id is not None:
|
||||
return FC_MGR.get_vm_by_id(vm_id)
|
||||
return None
|
||||
|
||||
def _validate_template(self, instance):
|
||||
"""_validate_template
|
||||
|
||||
:param instance: fc vm
|
||||
:return:
|
||||
"""
|
||||
if instance is not None and instance.isTemplate is not True:
|
||||
raise fc_exc.InstanceCloneFailure
|
||||
|
||||
for disk in instance['vmConfig']['disks']:
|
||||
if disk['sequenceNum'] not in [0, 1]:
|
||||
raise fc_exc.InstanceCloneFailure
|
||||
|
||||
|
||||
def get_vm_create(fc_client, task_ops, instance, image_meta=None):
|
||||
"""get create vm object"""
|
||||
vm_class = VmCreateWithGlanceImage
|
||||
|
||||
return vm_class(fc_client, task_ops, instance)
|
|
@ -0,0 +1,392 @@
|
|||
# Copyright 2016 Huawei Technologies Co.,LTD.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.i18n import _
|
||||
from nova import utils as nova_utils
|
||||
from nova.virt.fusioncomputeapi import constant
|
||||
from nova.virt.fusioncomputeapi import exception as fc_exc
|
||||
from nova.virt.fusioncomputeapi import ops_task_base
|
||||
from nova.virt.fusioncomputeapi import utils
|
||||
from nova.virt.fusioncomputeapi.utils import LOG
|
||||
from nova.volume import cinder
|
||||
|
||||
|
||||
class VolumeOps(ops_task_base.OpsTaskBase):
|
||||
"""volume operation class
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fc_client, task_ops):
|
||||
super(VolumeOps, self).__init__(fc_client, task_ops)
|
||||
self._volume_api = cinder.API()
|
||||
|
||||
def get_block_device_meta_data(self, context, block_device_info):
|
||||
"""get volume meta data info from input info
|
||||
|
||||
:param context:
|
||||
:param block_device_info:
|
||||
:return:
|
||||
"""
|
||||
LOG.debug('volume info is: %s', block_device_info)
|
||||
if len(block_device_info['block_device_mapping']) > 0:
|
||||
volume_info = block_device_info['block_device_mapping'][0]
|
||||
volume_id = volume_info['connection_info']['serial']
|
||||
return self._get_volume_meta_data(context, volume_id)
|
||||
return None
|
||||
|
||||
def ensure_volume(self, volume_info):
|
||||
"""Ensure volume resource on FC
|
||||
|
||||
:param volume_info:
|
||||
:return:
|
||||
"""
|
||||
LOG.debug('volume info is: %s', volume_info)
|
||||
|
||||
return [
|
||||
{
|
||||
'urn': bdm['connection_info']['vol_urn'],
|
||||
'mount_device': bdm['mount_device']
|
||||
}
|
||||
for bdm in volume_info['block_device_mapping']
|
||||
]
|
||||
|
||||
def expand_volume(self, fc_vm, vol_config):
|
||||
"""Expand sys volume
|
||||
|
||||
:param fc_vm: FC instance
|
||||
:param vol_config:
|
||||
:return:
|
||||
"""
|
||||
vm_expandvol_uri = fc_vm.get_vm_action_uri('expandvol')
|
||||
return self.post(vm_expandvol_uri, data=vol_config,
|
||||
excp=fc_exc.InstanceExpandvolFailure)
|
||||
|
||||
def attach_volume(self, fc_vm, vol_config):
|
||||
"""Attach volume for vm
|
||||
|
||||
:param fc_vm: FC instance
|
||||
:param vol_config:
|
||||
:return:
|
||||
"""
|
||||
vm_attachvol_uri = fc_vm.get_vm_action_uri('attachvol')
|
||||
self.post(vm_attachvol_uri, data=vol_config,
|
||||
excp=fc_exc.InstanceAttachvolFailure)
|
||||
|
||||
def detach_volume(self, fc_vm, vol_config, is_snapshot_del=True):
|
||||
"""Detach volume for vm
|
||||
|
||||
:param fc_vm: FC instance
|
||||
:param vol_config:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if constant.CONF.fusioncompute.enable_snapshot_auto_del \
|
||||
and is_snapshot_del:
|
||||
snapshot_lock = "%s_snapshot" % fc_vm.uuid
|
||||
self.pre_detach_volume(snapshot_lock, fc_vm.uri,
|
||||
vol_config.get('volUrn'))
|
||||
|
||||
vm_detachvol_uri = fc_vm.get_vm_action_uri('detachvol')
|
||||
self.post(vm_detachvol_uri, data=vol_config,
|
||||
excp=fc_exc.InstanceDetachvolFailure)
|
||||
|
||||
def create_local_disk_batch(self, **kwargs):
|
||||
|
||||
uri = self.site.volume_uri + '/createinbatch'
|
||||
|
||||
safe_format = kwargs.get('local_disk_safe_format')
|
||||
if safe_format and safe_format.lower == 'true':
|
||||
safe_format = True
|
||||
else:
|
||||
safe_format = False
|
||||
|
||||
body = {
|
||||
'clusterUrn': kwargs.get('cluster_urn'),
|
||||
'numberOfVolumes': kwargs.get('local_disk_count'),
|
||||
'volumeSize': kwargs.get('local_disk_size'),
|
||||
'type': kwargs.get('local_disk_type'),
|
||||
'safeFormat': safe_format,
|
||||
'volumeUrns': kwargs.get('volume_urns'),
|
||||
'vmUrn': kwargs.get('fc_vm_urn'),
|
||||
'datastoreUsageMode': 0}
|
||||
|
||||
response = self.post(uri, body)
|
||||
|
||||
return response.get('urn')
|
||||
|
||||
def delete_volume(self, vol_uri):
|
||||
"""Delete volume
|
||||
|
||||
:param vol_uri:
|
||||
:return:
|
||||
"""
|
||||
self.delete(vol_uri, excp=fc_exc.VolumeDeleteFailure)
|
||||
|
||||
def create_image_from_volume(self, vol_uri, vol, image_id):
|
||||
"""create_image_from_volume
|
||||
|
||||
:param vol_uri: volume action uri
|
||||
:param vol:
|
||||
:param image_id:
|
||||
:return:
|
||||
"""
|
||||
body = {
|
||||
'volumePara': {
|
||||
'quantityGB': vol.get('quantityGB'),
|
||||
'urn': vol.get('volumeUrn')
|
||||
},
|
||||
'imagePara': {
|
||||
'id': image_id,
|
||||
'url': constant.CONF.fusioncompute.fc_image_path
|
||||
}
|
||||
}
|
||||
|
||||
image_create_uri = vol_uri + '/volumetoimage'
|
||||
self.post(image_create_uri, data=body, excp=fc_exc.ImageCreateFailure)
|
||||
|
||||
def _get_volume_meta_data(self, context, volume_id):
|
||||
"""from cinder get volume metadata
|
||||
|
||||
:param volume_id:
|
||||
:return:
|
||||
"""
|
||||
LOG.debug(_('get_volume_meta_data enter, volume_id:%s.'), volume_id)
|
||||
return self._volume_api.get(context, volume_id)
|
||||
|
||||
def set_qos_specs_to_volume(self, info):
|
||||
"""set_qos_specs_to_volume
|
||||
|
||||
:param info
|
||||
:return:
|
||||
"""
|
||||
|
||||
def _set_qos_specs_to_volume(self, connection_info):
|
||||
"""_set_qos_specs_to_volume
|
||||
|
||||
:param connection_info
|
||||
:return:
|
||||
"""
|
||||
qos_para = {'maxReadBytes': 0,
|
||||
'maxWriteBytes': 0,
|
||||
'maxReadRequest': 0,
|
||||
'maxWriteRequest': 0}
|
||||
key_cvt_map = {'read_bytes_sec': 'maxReadBytes',
|
||||
'write_bytes_sec': 'maxWriteBytes',
|
||||
'read_iops_sec': 'maxReadRequest',
|
||||
'write_iops_sec': 'maxWriteRequest'}
|
||||
tune_opts = ['read_bytes_sec', 'write_bytes_sec',
|
||||
'read_iops_sec', 'write_iops_sec']
|
||||
tune_cvt_opts = ['read_bytes_sec', 'write_bytes_sec']
|
||||
# Extract rate_limit control parameters
|
||||
if connection_info is None or 'data' not in connection_info:
|
||||
return
|
||||
|
||||
specs = connection_info['data']['qos_specs']
|
||||
vol_urn = connection_info.get('vol_urn')
|
||||
|
||||
if vol_urn is None:
|
||||
return
|
||||
|
||||
# because the volume can be detached and attach to another instance
|
||||
# qos maybe disassociated from volume type
|
||||
# between the up two operations
|
||||
# so if specs is none,set default value to FC.
|
||||
if specs is not None:
|
||||
if isinstance(specs, dict):
|
||||
for key, value in specs.iteritems():
|
||||
if key in tune_opts:
|
||||
# convert byte to KB for FC,0 is no limited,
|
||||
# the value is at least 1
|
||||
output_value = value
|
||||
|
||||
if key in tune_cvt_opts:
|
||||
addition = 0
|
||||
if output_value.isdigit():
|
||||
if long(value) % 1024 != 0:
|
||||
addition = 1
|
||||
output_value = long(value) / 1024 \
|
||||
+ addition
|
||||
|
||||
qos_para[key_cvt_map[key]] = output_value
|
||||
else:
|
||||
LOG.debug(_('Unknown content in connection_info '
|
||||
'qos_specs: %s'), specs)
|
||||
return
|
||||
|
||||
qos_specs_uri = utils.generate_uri_from_urn(vol_urn) \
|
||||
+ constant.VOL_URI_MAP['modio']
|
||||
|
||||
# Send Qos IO Specs to VRM with put method
|
||||
self.put(qos_specs_uri, data=qos_para,
|
||||
excp=fc_exc.SetQosIoFailure, fixedInterval=1)
|
||||
|
||||
if isinstance(info, dict):
|
||||
# input para is block_device_info
|
||||
if 'block_device_mapping' in info:
|
||||
block_device_mapping = info.get('block_device_mapping', [])
|
||||
for vol in block_device_mapping:
|
||||
connection_info = vol['connection_info']
|
||||
_set_qos_specs_to_volume(self, connection_info)
|
||||
# input para is connection_info
|
||||
else:
|
||||
_set_qos_specs_to_volume(self, info)
|
||||
|
||||
def query_vm_snapshot(self, instance_url):
|
||||
"""query vm all snapshot and record its in list
|
||||
|
||||
:param instance_url:
|
||||
:return:
|
||||
"""
|
||||
def _route_all_snapshots(snapshot, snapshot_list):
|
||||
if len(snapshot_list) > 32 or \
|
||||
isinstance(snapshot, dict) is False:
|
||||
return
|
||||
|
||||
child_snapshots = snapshot.get('childSnapshots')
|
||||
if isinstance(snapshots, list) is False:
|
||||
return
|
||||
|
||||
for child_snap in child_snapshots:
|
||||
_route_all_snapshots(child_snap, snapshot_list)
|
||||
|
||||
node = {}
|
||||
node['name'] = snapshot.get('name')
|
||||
node['uri'] = snapshot.get('uri')
|
||||
node['status'] = snapshot.get('status')
|
||||
node['type'] = snapshot.get('type')
|
||||
snapshot_list.append(node)
|
||||
return
|
||||
|
||||
def _query_snapshot_volumes(snapshot_url):
|
||||
"""query all volumes in snapshot and record it in list
|
||||
|
||||
"""
|
||||
try:
|
||||
rsp = self.get(snapshot_url)
|
||||
except Exception as e:
|
||||
if e.message.find('10300109') > 0:
|
||||
rsp = {}
|
||||
else:
|
||||
msg = _('Query %s snapshot error') % snapshot_url
|
||||
raise fc_exc.InvalidSnapshotInfo(msg)
|
||||
|
||||
volsnapshots = rsp.get('volsnapshots')
|
||||
if isinstance(volsnapshots, list) is False:
|
||||
LOG.info("snapshot not include any volume, %s" % rsp)
|
||||
return []
|
||||
return map(lambda x: x.get('volumeUrn'), volsnapshots)
|
||||
|
||||
snapshot_url = '%s/snapshots' % instance_url
|
||||
try:
|
||||
rsp = self.get(snapshot_url)
|
||||
except Exception as e:
|
||||
if e.message.find('10300109') > 0:
|
||||
rsp = {}
|
||||
else:
|
||||
msg = _('query %s snapshot error') % snapshot_url
|
||||
raise fc_exc.InvalidSnapshotInfo(msg)
|
||||
|
||||
rootSnaps = rsp.get('rootSnapshots')
|
||||
if isinstance(rootSnaps, list) is False:
|
||||
return None
|
||||
|
||||
snapshots = []
|
||||
for snap in rootSnaps:
|
||||
_route_all_snapshots(snap, snapshots)
|
||||
|
||||
for snap in snapshots:
|
||||
snapshot_volumes = _query_snapshot_volumes(snap.get('uri'))
|
||||
snap.update({'volumeUriList': snapshot_volumes})
|
||||
return snapshots
|
||||
|
||||
def need_del_backup_snapshots(self, snapshot_info_list, volume_urn):
|
||||
"""need_del_backup_snapshots
|
||||
|
||||
:param snapshot_info_list:
|
||||
:param volume_urn:
|
||||
:return:
|
||||
"""
|
||||
|
||||
def _is_vol_in_snap(snapshot_info, volume_urn):
|
||||
snapshot_volume_list = snapshot_info.get('volumeUriList')
|
||||
if isinstance(snapshot_volume_list, list) is not True:
|
||||
return False
|
||||
return volume_urn in snapshot_volume_list
|
||||
|
||||
snapshots_with_volume = filter(
|
||||
lambda x: _is_vol_in_snap(
|
||||
x, volume_urn), snapshot_info_list)
|
||||
if snapshots_with_volume is None or len(snapshots_with_volume) == 0:
|
||||
LOG.info(
|
||||
"can't find volume %s in snapshot %s" %
|
||||
(volume_urn, snapshot_info_list))
|
||||
return []
|
||||
|
||||
for snapshot in snapshots_with_volume:
|
||||
type = snapshot.get('type')
|
||||
status = snapshot.get('status')
|
||||
if (type != 'backup' and type != 'CBTbackup') or status != 'ready':
|
||||
msg = _('snapshot is % s ') % (type)
|
||||
LOG.info(msg)
|
||||
raise fc_exc.InvalidSnapshotInfo(msg)
|
||||
return snapshots_with_volume
|
||||
|
||||
def pre_detach_volume(self, snapshot_lock, instance_url, volume_url):
|
||||
"""pre_detach_volume
|
||||
|
||||
:param instance_url:
|
||||
:param volume_url:
|
||||
:return:
|
||||
"""
|
||||
def _def_vm_snapshot(snapshot_url):
|
||||
try:
|
||||
self.delete(snapshot_url)
|
||||
except Exception as e:
|
||||
if e.message.find('10300109') > 0:
|
||||
LOG.warn("snapshot %s has been deleted" % snapshot_url)
|
||||
pass
|
||||
else:
|
||||
msg = _('del %s snapshot error') % snapshot_url
|
||||
raise fc_exc.InvalidSnapshotInfo(msg)
|
||||
|
||||
@nova_utils.synchronized(snapshot_lock)
|
||||
def _do_pre_detach_volume(instance_url, volume_url):
|
||||
snap_infos = self.query_vm_snapshot(instance_url)
|
||||
need_del_snap = self.need_del_backup_snapshots(
|
||||
snap_infos, volume_url)
|
||||
for snap in need_del_snap:
|
||||
_def_vm_snapshot(snap.get('uri'))
|
||||
|
||||
return _do_pre_detach_volume(instance_url, volume_url)
|
||||
|
||||
def query_volume(self, **kwargs):
|
||||
'''query_volume
|
||||
|
||||
'query_volume': ('GET',
|
||||
('/volumes', kwargs.
|
||||
get(self.RESOURCE_URI), None,
|
||||
kwargs.get('id')),
|
||||
{'limit': kwargs.get('limit'),
|
||||
'offset': kwargs.get('offset'),
|
||||
'scope': kwargs.get('scope')
|
||||
},
|
||||
{},
|
||||
False),
|
||||
'''
|
||||
LOG.debug(_("[VRM-CINDER] start query_volume()"))
|
||||
|
||||
uri = self.site.volume_uri + '/' + kwargs.get('id')
|
||||
response = self.get(uri)
|
||||
return response
|
|
@ -0,0 +1,275 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Glance Release Notes documentation build configuration file, created by
|
||||
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'oslosphinx',
|
||||
'reno.sphinxext',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'nova-fusioncompute Release Notes'
|
||||
copyright = u'2016, OpenStack Foundation'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = ''
|
||||
# The short X.Y version.
|
||||
version = ''
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
# language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = []
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
# keep_warnings = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
# html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
# html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
# html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'GlanceReleaseNotesdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
# 'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
|
||||
u'Glance Developers', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
# latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
|
||||
[u'Glance Developers'], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
# man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
|
||||
u'Glance Developers', 'GlanceReleaseNotes',
|
||||
'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
# texinfo_no_detailmenu = False
|
||||
|
||||
# -- Options for Internationalization output ------------------------------
|
||||
locale_dirs = ['locale/']
|
|
@ -0,0 +1,8 @@
|
|||
============================================
|
||||
nova-fusioncompute Release Notes
|
||||
============================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
unreleased
|
|
@ -0,0 +1,5 @@
|
|||
==============================
|
||||
Current Series Release Notes
|
||||
==============================
|
||||
|
||||
.. release-notes::
|
|
@ -0,0 +1,18 @@
|
|||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
pbr>=1.6 # Apache-2.0
|
||||
oslo.config>=3.14.0 # Apache-2.0
|
||||
oslo.cache>=1.5.0 # Apache-2.0
|
||||
oslo.concurrency>=3.8.0 # Apache-2.0
|
||||
oslo.context>=2.9.0 # Apache-2.0
|
||||
oslo.log>=3.11.0 # Apache-2.0
|
||||
oslo.reports>=0.6.0 # Apache-2.0
|
||||
oslo.serialization>=1.10.0 # Apache-2.0
|
||||
oslo.db!=4.13.1,!=4.13.2,>=4.10.0 # Apache-2.0
|
||||
oslo.rootwrap>=5.0.0 # Apache-2.0
|
||||
oslo.messaging>=5.2.0 # Apache-2.0
|
||||
oslo.privsep>=1.9.0 # Apache-2.0
|
||||
oslo.i18n>=2.1.0 # Apache-2.0
|
||||
oslo.service>=1.10.0 # Apache-2.0
|
|
@ -0,0 +1,51 @@
|
|||
[metadata]
|
||||
name = nova-fusioncompute
|
||||
summary = nova-fusioncompute is Huawei FusionCompute[1] virtualization driver for OpenStack Nova
|
||||
description-file =
|
||||
README.rst
|
||||
author = OpenStack
|
||||
author-email = openstack-dev@lists.openstack.org
|
||||
home-page = http://www.openstack.org/
|
||||
classifier =
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Information Technology
|
||||
Intended Audience :: System Administrators
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.7
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.3
|
||||
Programming Language :: Python :: 3.4
|
||||
|
||||
[files]
|
||||
packages =
|
||||
nova-fusioncompute
|
||||
|
||||
[build_sphinx]
|
||||
source-dir = doc/source
|
||||
build-dir = doc/build
|
||||
all_files = 1
|
||||
|
||||
[upload_sphinx]
|
||||
upload-dir = doc/build/html
|
||||
|
||||
[compile_catalog]
|
||||
directory = nova-fusioncompute/locale
|
||||
domain = nova-fusioncompute
|
||||
|
||||
[update_catalog]
|
||||
domain = nova-fusioncompute
|
||||
output_dir = nova-fusioncompute/locale
|
||||
input_file = nova-fusioncompute/locale/nova-fusioncompute.pot
|
||||
|
||||
[extract_messages]
|
||||
keywords = _ gettext ngettext l_ lazy_gettext
|
||||
mapping_file = babel.cfg
|
||||
output_file = nova-fusioncompute/locale/nova-fusioncompute.pot
|
||||
|
||||
[build_releasenotes]
|
||||
all_files = 1
|
||||
build-dir = releasenotes/build
|
||||
source-dir = releasenotes/source
|
|
@ -0,0 +1,29 @@
|
|||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||
import setuptools
|
||||
|
||||
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||
# setuptools if some other modules registered functions in `atexit`.
|
||||
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||
try:
|
||||
import multiprocessing # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr'],
|
||||
pbr=True)
|
|
@ -0,0 +1,17 @@
|
|||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
hacking>=0.11.0,<0.12 # Apache-2.0
|
||||
|
||||
coverage>=3.6 # Apache-2.0
|
||||
python-subunit>=0.0.18 # Apache-2.0/BSD
|
||||
sphinx>=1.2.1,!=1.3b1,<1.4 # BSD
|
||||
oslosphinx>=4.7.0 # Apache-2.0
|
||||
oslotest>=1.10.0 # Apache-2.0
|
||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||
testscenarios>=0.4 # Apache-2.0/BSD
|
||||
testtools>=1.4.0 # MIT
|
||||
|
||||
# releasenotes
|
||||
reno>=1.8.0 # Apache2
|
|
@ -0,0 +1,40 @@
|
|||
[tox]
|
||||
minversion = 2.0
|
||||
envlist = py34,py27,pypy,pep8
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
usedevelop = True
|
||||
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
|
||||
setenv =
|
||||
VIRTUAL_ENV={envdir}
|
||||
PYTHONWARNINGS=default::DeprecationWarning
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = python setup.py test --slowest --testr-args='{posargs}'
|
||||
|
||||
[testenv:pep8]
|
||||
commands = flake8 {posargs}
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:cover]
|
||||
commands = python setup.py test --coverage --testr-args='{posargs}'
|
||||
|
||||
[testenv:docs]
|
||||
commands = python setup.py build_sphinx
|
||||
|
||||
[testenv:releasenotes]
|
||||
commands =
|
||||
sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
|
||||
|
||||
[testenv:debug]
|
||||
commands = oslo_debug_helper {posargs}
|
||||
|
||||
[flake8]
|
||||
# E123, E125 skipped as they are invalid PEP-8.
|
||||
|
||||
show-source = True
|
||||
ignore = E123,E125,N342,H104,W391
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
|
Loading…
Reference in New Issue