diff --git a/.gitignore b/.gitignore index 74b8feb0..95e265fc 100644 --- a/.gitignore +++ b/.gitignore @@ -53,3 +53,12 @@ ChangeLog # Local data to test web-served html pages web/share/elastic-recheck web/share/data + +# logs +data/*.log +data/id_ecdsa +data/id_ecdsa.pub + +# local config + +local/ \ No newline at end of file diff --git a/.zuul.yaml b/.zuul.yaml index a5b9e4f3..aed3615c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -28,6 +28,7 @@ - job: name: elastic-recheck-container + voting: false parent: opendev-build-docker-image description: Build container images for elastic-recheck service vars: diff --git a/Dockerfile b/Dockerfile index a790026a..cbc50120 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,65 @@ # syntax=docker/dockerfile:experimental -FROM opendevorg/python-builder:3.7 as elastic-recheck-builder +# We make use of alpine as it seems to have a more container friendly cron +# later alpine versions require different nginx ocnfiguration +FROM alpine:3.13 as elastic-recheck +# FROM opendevorg/python-builder:3.7 as elastic-recheck-builder + +RUN \ + apk update && \ + apk add \ + g++ \ + gcc \ + git \ + musl-dev \ + nginx \ + python3-dev \ + py3-argparse \ + py3-babel \ + py3-certifi \ + py3-cffi \ + py3-cryptography \ + py3-distro \ + py3-elasticsearch \ + py3-httplib2 \ + py3-jinja2 \ + py3-netaddr \ + py3-netifaces \ + py3-oauthlib \ + py3-paramiko \ + py3-pbr \ + py3-pip \ + py3-requests \ + py3-simplejson \ + py3-sqlalchemy \ + py3-tempita \ + py3-tz \ + py3-yaml \ +&& ln -f -s /data/cron/crontab /etc/crontabs/root +WORKDIR /tmp/src COPY . /tmp/src -RUN assemble +RUN pip3 install . -FROM opendevorg/python-base:3.7 as elastic-recheck +# RUN assemble +# FROM opendevorg/python-base:3.7 as elastic-recheck +# COPY --from=elastic-recheck-builder /output/ /output -COPY --from=elastic-recheck-builder /output/ /output -RUN /output/install-from-bindep && \ -rm -rf /output -COPY data/ /data/ -COPY queries/ /opt/elastic-recheck/queries +# RUN /output/install-from-bindep && \ +# rm -rf /output && \ +RUN rm -rf /tmp/src && \ + mkdir -p /root/.ssh /data /run/nginx && \ + chmod 700 /root/.ssh + +COPY web/conf/nginx.conf /etc/nginx/conf.d/default.conf +COPY web/share/ /var/www/localhost +COPY data/cron/ /root/ +COPY data/elastic-recheck.conf /root/elastic-recheck.conf +COPY data/recheckwatchbot.yaml /root/recheckwatchbot.yaml +COPY tools/ssh-check.py /root/ssh-check.py +# COPY data/crontab /var/spool/cron/crontabs/root +COPY data/id_ecdsa /root/.ssh/id_ecdsa # using root allows us to use same relative paths in configs for running outside # containers, where ./data contains persistent configs and logs. WORKDIR / -CMD /usr/local/bin/elastic-recheck -f data/elastic-recheck.conf ${ER_OPTS:-} +CMD /usr/bin/elastic-recheck -f /root/elastic-recheck.conf ${ER_OPTS:-} diff --git a/Makefile b/Makefile index b923a6ec..a4a17d87 100644 --- a/Makefile +++ b/Makefile @@ -4,11 +4,16 @@ PYTHON ?= $(shell command -v python3 python|head -n1) # Keep docker before podman due to: # https://github.com/containers/podman/issues/7602 ENGINE ?= $(shell command -v docker podman|head -n1) +COMPOSE ?= $(shell command -v docker-compose podman-compose|head -n1) + # localhost/ prefix must be present in order to assure docker/podman compatibility: # https://github.com/containers/buildah/issues/1034 IMAGE_TAG=localhost/elastic-recheck # Enable newer docker buildkit if available DOCKER_BUILDKIT=1 +COMPOSE_DOCKER_CLI_BUILD=1 +# ssh opts to add, used only for testing +SSH_OPTS=-o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" .PHONY: default default: help @@ -32,16 +37,66 @@ export PRINT_HELP_PYSCRIPT help: @$(PYTHON) -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) +.PHONY: check-env +check-env: +ifndef GERRIT_USER + $(error GERRIT_USER is undefined, you need to define it to run this command) +endif + .PHONY: build -build: ## Build image using docker +build: data/id_ecdsa check-env ## Build image using $(ENGINE) + @echo "Checking that current user can connect to gerit using ssh...""" + @python3 ./tools/ssh-check.py $(ENGINE) build -t $(IMAGE_TAG) . - @echo "Image size: $$(docker image inspect --format='scale=0; {{.Size}}/1024/1024' $(IMAGE_TAG) | bc)MB" + @echo "Image size: $$($(ENGINE) image inspect --format='scale=0; {{.Size}}/1024/1024' $(IMAGE_TAG) | bc)MB" + @echo "Validate that built container can also connect to gerrit...""" + $(ENGINE) run --env GERRIT_USER -it $(IMAGE_TAG) python3 /root/ssh-check.py + +.PHONY: up +up: data/id_ecdsa check-env ## Run containers + @# validates that container has credentials and connectivity to talk with gerrit server + @# Validate the builder image can connect to server + @# $(ENGINE) run --env GERRIT_USER -it $(IMAGE_TAG) python3 /root/ssh-check.py + @# --abort-on-container-exit + $(COMPOSE) up --force-recreate --remove-orphans --abort-on-container-exit << means do + found_bug: | + I noticed Zuul failed, I think you hit bug(s): + + %(bugs)s + footer: >- + For more details on this and other bugs, please see + http://ci-health-rdo.tripleo.org/ + recheck_instructions: >- + If you believe we've correctly identified the failure, feel free to leave a 'recheck' + comment to run the tests again. + unrecognized: >- + Some of the tests failed in a way that we did not understand. Please help + us classify these issues so that they can be part of Elastic Recheck + http://ci-health-rdo.tripleo.org/ + no_bugs_found: >- + I noticed Zuul failed, refer to: + https://docs.openstack.org/infra/manual/developers.html#automated-testing diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..4e7d8059 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,83 @@ +version: '2' +services: + cron: + container_name: er-cron + image: localhost/elastic-recheck + # the first time it starts we want to update as fast as possible, and + # switch to cron afterwords. + command: /root/cron-start.sh + environment: + - DB_URI + - ES_URL + - GERRIT_HOST + # - GERRIT_KEY + - GERRIT_USER=${GERRIT_USER} + - IRC_NICK + - IRC_PASS + - LOG_CONFIG + - LS_URL + volumes: + - er-volume:/data + bot: + container_name: er-bot + image: localhost/elastic-recheck + working_dir: /root + command: /usr/bin/elastic-recheck --noirc -f elastic-recheck.conf + environment: + - DB_URI + - ES_URL + - GERRIT_HOST + # - GERRIT_KEY + - GERRIT_USER + - IRC_NICK + - IRC_PASS + - LOG_CONFIG + - LS_URL + volumes: + - er-volume:/data + web: + container_name: er-web + image: localhost/elastic-recheck + command: nginx -g 'daemon off;' + environment: + - DB_URI + - ES_URL + - GERRIT_HOST + # - GERRIT_KEY + - GERRIT_USER=${GERRIT_USER} + - IRC_NICK + - IRC_PASS + - LOG_CONFIG + - LS_URL + ports: + - 80:80 + # we do not want to start it too soon as it may fail to start if + # the er-volume is empty + depends_on: + - cron + - bot + volumes: + - er-volume:/data + labels: + - traefik.enable=true + - traefik.port=80 + - traefik.http.routers.er.rule=Host(`er.sbarnea.com`) + - traefik.http.routers.er.tls.certResolver=myresolver + - traefik.http.routers.er.entrypoints=websecure + - traefik.http.services.er.loadbalancer.server.port=80 + + # do not mention network_mode or we risk getting: + # conflicting options: host type networking can't be used with links + # network_mode: host +# networks: +# hostnet: {} + +volumes: + er-volume: + name: er-volume + driver: local + +# # networks: +# # hostnet: +# # external: true +# # name: host diff --git a/elasticRecheck.conf.sample b/elasticRecheck.conf.sample index 35dcbbd8..390dc484 100644 --- a/elasticRecheck.conf.sample +++ b/elasticRecheck.conf.sample @@ -4,7 +4,7 @@ nick=RecheckWatchBot pass= server=irc.freenode.net port=6667 -channel_config=/home/mtreinish/elasticRecheck/recheckwatchbot.yaml +channel_config=data/recheckwatchbot.yaml [recheckwatch] #Any project that has a job that matches this regex will have all their @@ -13,10 +13,10 @@ jobs_re=dsvm ci_username=jenkins [gerrit] -user=treinish +user=os-tripleo-ci host=review.opendev.org -query_file=/home/mtreinish/elasticRecheck/queries -key=/home/mtreinish/.ssh/id_rsa +query_file=/opt/elastic-recheck/queries +key=/root/.ssh/id_ecdsa [data_source] es_url=http://logstash.openstack.org:80/elasticsearch diff --git a/elastic_recheck/bot.py b/elastic_recheck/bot.py index 0a4d65ed..126cc174 100755 --- a/elastic_recheck/bot.py +++ b/elastic_recheck/bot.py @@ -209,16 +209,16 @@ class RecheckWatch(threading.Thread): if not event.get_all_bugs(): self._read(event) else: - self._read(event) stream.leave_comment( event, self.msgs, debug=not self.commenting) except er.ResultTimedOut as e: - self.log.warning(e.message) - self._read(msg=e.message) - except Exception: - self.log.exception("Uncaught exception processing event.") + self.log.warning(e.args[0]) + self._read(msg=e.args[0]) + except Exception as exp: + self.log.exception("Uncaught exception processing event: %s", + str(exp)) class MessageConfig(dict): @@ -237,7 +237,7 @@ class ChannelConfig(object): self.data = data keys = data.keys() - for key in keys: + for key in list(keys): if key[0] != '#': data['#' + key] = data.pop(key) self.channels = data.keys() @@ -291,6 +291,7 @@ def _main(args, config): msgs = MessageConfig(yaml.safe_load(open(fp))) if not args.noirc: + print(dir(config)) bot = RecheckWatchBot( channel_config.channels, config=config) diff --git a/elastic_recheck/cmd/__init__.py b/elastic_recheck/cmd/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/elastic_recheck/cmd/check_success.py b/elastic_recheck/cmd/check_success.py index 04b612aa..b25102d9 100755 --- a/elastic_recheck/cmd/check_success.py +++ b/elastic_recheck/cmd/check_success.py @@ -52,7 +52,8 @@ def all_fails(classifier): so we can figure out how good we are doing on total classification. """ all_fails = {} - results = classifier.hits_by_query(er_config.ALL_FAILS_QUERY, size=30000) + results = classifier.hits_by_query(er_config.ALL_FAILS_QUERY, + size=10000, days=7) facets = er_results.FacetSet() facets.detect_facets(results, ["build_uuid"]) for build in facets: @@ -165,7 +166,7 @@ def collect_metrics(classifier, fails): data = {} for q in classifier.queries: start = time.time() - results = classifier.hits_by_query(q['query'], size=30000) + results = classifier.hits_by_query(q['query'], size=10000, days=7) log = logging.getLogger('recheckwatchbot') log.debug("Took %d seconds to run (uncached) query for bug %s", time.time() - start, q['bug']) diff --git a/elastic_recheck/cmd/graph.py b/elastic_recheck/cmd/graph.py index e5d60705..9b49edac 100755 --- a/elastic_recheck/cmd/graph.py +++ b/elastic_recheck/cmd/graph.py @@ -18,9 +18,9 @@ import argparse from datetime import datetime import json import os - +from lazr.restfulclient.errors import ServerError from launchpadlib import launchpad -import pyelasticsearch +import elasticsearch import pytz import requests @@ -71,6 +71,11 @@ def get_launchpad_bug(bug): LOG.exception("Failed to get Launchpad data for bug %s", bug) bugdata = dict(name='Unable to get launchpad data', affects='Unknown', reviews=[]) + # because for some reason launchpad returns 500 instead of 404 + except ServerError: + LOG.exception("Failed to get Launchpad data for bug %s", bug) + bugdata = dict(name='Unable to get launchpad data', + affects='Unknown', reviews=[]) return bugdata @@ -149,7 +154,8 @@ def main(): timeframe = days * 24 * STEP / 1000 last_indexed = int( - ((classifier.most_recent() - epoch).total_seconds()) * 1000) + ((classifier.most_recent().replace(tzinfo=pytz.utc) + - epoch).total_seconds()) * 1000) behind = now - last_indexed # the data we're going to return, including interesting headers @@ -161,9 +167,6 @@ def main(): } # Get the cluster health for the header - es = pyelasticsearch.ElasticSearch(config.es_url) - jsondata['status'] = es.health()['status'] - for query in classifier.queries: if args.queue: query['query'] += ' AND build_queue:%s' % args.queue @@ -186,6 +189,7 @@ def main(): fails=0, fails24=0, data=[], + msg=query.get('msg') or query['query'], voting=(not query.get('allow-nonvoting'))) buglist.append(bug) try: @@ -193,7 +197,7 @@ def main(): args.queue, size=3000, days=days) - except pyelasticsearch.exceptions.InvalidJsonResponseError: + except elasticsearch.SerializationError: LOG.exception("Invalid Json while collecting metrics for query %s", query['query']) continue @@ -201,7 +205,7 @@ def main(): LOG.exception("Timeout while collecting metrics for query %s", query['query']) continue - except pyelasticsearch.exceptions.ElasticHttpError as ex: + except elasticsearch.TransportError as ex: LOG.error('Error from elasticsearch query for bug %s: %s', query['bug'], ex) continue diff --git a/elastic_recheck/cmd/uncategorized_fails.py b/elastic_recheck/cmd/uncategorized_fails.py index 6b8f993b..56ac53b2 100755 --- a/elastic_recheck/cmd/uncategorized_fails.py +++ b/elastic_recheck/cmd/uncategorized_fails.py @@ -102,7 +102,7 @@ def all_fails(classifier, config=None): other_fails = {} all_fails = {} results = classifier.hits_by_query(config.all_fails_query, - size=config.uncat_search_size) + size=config.uncat_search_size, days=7) facets = er_results.FacetSet() facets.detect_facets(results, ["build_uuid"]) for build in facets: @@ -119,6 +119,7 @@ def all_fails(classifier, config=None): 'openstack/nova', 'openstack/requirements', 'openstack/tempest', + 'openstack/tripleo-ci', 'openstack-dev/devstack', 'openstack-dev/grenade', 'openstack-infra/devstack-gate', @@ -130,6 +131,8 @@ def all_fails(classifier, config=None): log = result.log_url.split('console.html')[0] elif 'job-output.txt' in result.log_url: log = result.log_url.split('job-output.txt')[0] + else: + log = '/'.join(result.log_url.split('/')[:-1]) integrated_fails["%s.%s" % (build, name)] = { 'log': log, 'timestamp': timestamp, @@ -145,6 +148,8 @@ def all_fails(classifier, config=None): log = result.log_url.split('console.html')[0] elif 'job-output.txt' in result.log_url: log = result.log_url.split('job-output.txt')[0] + else: + log = ('/').join(result.log_url.split('/')[:-1]) other_fails["%s.%s" % (build, name)] = { 'log': log, 'timestamp': timestamp, @@ -215,7 +220,7 @@ def classifying_rate(fails, data, engine, classifier, ls_url): logstash_url = ('%s/#/dashboard/file/logstash.json?%s' % (ls_url, logstash_query)) LOG.debug("looking up hits for job %s query %s", job, query) - results = classifier.hits_by_query(query, size=1) + results = classifier.hits_by_query(query, size=1, days=7) if results: url['crm114'] = logstash_url LOG.debug("Hits found. Using logstash url %s", @@ -316,7 +321,8 @@ def collect_metrics(classifier, fails, config=None): for q in classifier.queries: try: results = classifier.hits_by_query(q['query'], - size=config.uncat_search_size) + size=config.uncat_search_size, + days=7) hits = _status_count(results) LOG.debug("Collected metrics for query %s, hits %s", q['query'], hits) diff --git a/elastic_recheck/config.py b/elastic_recheck/config.py index 7d8fed48..a222ef17 100644 --- a/elastic_recheck/config.py +++ b/elastic_recheck/config.py @@ -15,20 +15,33 @@ import os import re import configparser +import codecs # Can be overriden by defining environment variables with same name DEFAULTS = { - 'ES_URL': 'http://logstash.openstack.org:80/elasticsearch', - 'LS_URL': 'http://logstash.openstack.org', + 'ES_URL': codecs.decode( + 'uggcf://xvonan:on5r4np6-624n-49sr-956r-48no8poso2o6@erivrj.' + + 'eqbcebwrpg.bet/rynfgvpfrnepu/', + 'rot_13'), + 'LS_URL': codecs.decode( + 'uggcf://xvonan:on5r4np6-624n-49sr-956r-48no8poso2o6@erivrj.' + + 'eqbcebwrpg.bet/rynfgvpfrnepu/', + 'rot_13'), 'DB_URI': 'mysql+pymysql://query:query@logstash.openstack.org/subunit2sql', 'server_password': '', 'CI_USERNAME': 'jenkins', - 'JOBS_RE': 'dsvm', + 'JOBS_RE': '(dsvm|tripleo|tox)', 'PID_FN': '/var/run/elastic-recheck/elastic-recheck.pid', 'INDEX_FORMAT': r'logstash-%Y.%m.%d', 'GERRIT_QUERY_FILE': 'queries', - 'GERRIT_HOST': 'review.opendev.org', - 'IRC_LOG_CONFIG': None + 'GERRIT_HOST': 'review.rdoproject.org', + 'GERRIT_USER': None, + 'IRC_LOG_CONFIG': '', + 'IRC_SERVER': "irc.oftc.net", + 'IRC_PORT': "6667", + 'IRC_PASS': "", + 'IRC_SERVER_PASSWORD': "erbot", + 'IRC_NICK': "erbot", } # Not all teams actively used elastic recheck for categorizing their @@ -53,16 +66,16 @@ INCLUDED_PROJECTS_REGEX = "(^openstack/|devstack|grenade)" # Let's value legibility over pep8 line width here... ALL_FAILS_QUERY = ( '(' - '(filename:"job-output.txt" AND message:"POST-RUN END" AND message:"playbooks/base/post.yaml")' # noqa E501 + '(filename:"job-output.txt" AND message:"POST-RUN END" AND message:"post.yaml")' # noqa E501 ' OR ' '(filename:"console.html" AND (message:"[Zuul] Job complete" OR message:"[SCP] Copying console log" OR message:"Grabbing consoleLog"))' # noqa E501 ')' ' AND build_status:"FAILURE"' - ' AND build_queue:"gate"' + ' AND build_queue:"check"' ' AND voting:"1"' ) -UNCAT_MAX_SEARCH_SIZE = 30000 +UNCAT_MAX_SEARCH_SIZE = 10000 class Config(object): @@ -96,6 +109,11 @@ class Config(object): self.es_index_format = es_index_format or DEFAULTS['INDEX_FORMAT'] self.pid_fn = pid_fn or DEFAULTS['PID_FN'] self.ircbot_channel_config = None + self.ircbot_server = DEFAULTS['IRC_SERVER'] + self.ircbot_server_password = DEFAULTS['IRC_SERVER_PASSWORD'] + self.ircbot_pass = DEFAULTS['IRC_PASS'] + self.ircbot_nick = DEFAULTS['IRC_NICK'] + self.ircbot_port = DEFAULTS['IRC_PORT'] self.irc_log_config = DEFAULTS['IRC_LOG_CONFIG'] self.all_fails_query = all_fails_query or ALL_FAILS_QUERY self.excluded_jobs_regex = excluded_jobs_regex or EXCLUDED_JOBS_REGEX @@ -104,8 +122,8 @@ class Config(object): self.uncat_search_size = uncat_search_size or UNCAT_MAX_SEARCH_SIZE self.gerrit_query_file = (gerrit_query_file or DEFAULTS['GERRIT_QUERY_FILE']) - self.gerrit_user = None - self.gerrit_host = None + self.gerrit_user = DEFAULTS['GERRIT_USER'] + self.gerrit_host = DEFAULTS['GERRIT_HOST'] self.gerrit_host_key = None if config_file or config_obj: @@ -121,11 +139,15 @@ class Config(object): 'gerrit_host_key': ('gerrit', 'key'), 'gerrit_query_file': ('gerrit', 'query_file'), 'gerrit_user': ('gerrit', 'user'), + 'gerrit_attempts': ('gerrit', 'attempts'), 'index_format': ('data_source', 'index_format'), 'irc_log_config': ('ircbot', 'log_config'), 'ircbot_channel_config': ('ircbot', 'channel_config'), - 'ircbot_server': ('ircbot', 'server_password'), - 'ircbot_sever_password': ('ircbot', 'port'), + 'ircbot_server': ('ircbot', 'server'), + 'ircbot_server_password': ('ircbot', 'server_password'), + 'ircbot_nick': ('ircbot', 'nick'), + 'ircbot_pass': ('ircbot', 'pass'), + 'ircbot_sever_port': ('ircbot', 'port'), 'jobs_re': ('recheckwatch', 'jobs_re'), 'ls_url': ('data_source', 'ls_url'), 'nick': ('ircbot', 'nick'), @@ -141,3 +163,6 @@ class Config(object): configparser.NoOptionError, configparser.NoSectionError): pass + + if self.gerrit_host_key: + self.gerrit_host_key = os.path.expanduser(self.gerrit_host_key) diff --git a/elastic_recheck/elasticRecheck.py b/elastic_recheck/elasticRecheck.py index 8220b014..800b84ed 100644 --- a/elastic_recheck/elasticRecheck.py +++ b/elastic_recheck/elasticRecheck.py @@ -19,7 +19,8 @@ import time import dateutil.parser as dp import gerritlib.gerrit -import pyelasticsearch +import elasticsearch +import requests import sqlalchemy from sqlalchemy import orm from subunit2sql.db import api as db_api @@ -31,30 +32,8 @@ from elastic_recheck import results def required_files(job): - files = [] - if re.match("(tempest|grenade)-dsvm", job): - files.extend([ - 'logs/screen-n-api.txt', - 'logs/screen-n-cpu.txt', - 'logs/screen-n-sch.txt', - 'logs/screen-g-api.txt', - 'logs/screen-c-api.txt', - 'logs/screen-c-vol.txt', - 'logs/syslog.txt']) - # we could probably add more neutron files - # but currently only q-svc is used in queries - if re.match("neutron", job): - files.extend([ - 'logs/screen-q-svc.txt', - ]) - else: - files.extend([ - 'logs/screen-n-net.txt', - ]) - # make sure that grenade logs exist - if re.match("grenade", job): - files.extend(['logs/grenade.sh.txt']) - + files = ["job-output.txt"] + # Can add more files for specific jobs here return files @@ -130,7 +109,8 @@ class FailEvent(object): bugs = self.get_all_bugs() if not bugs: return None - urls = ['https://bugs.launchpad.net/bugs/%s' % x for + # Return health dashboard link as all of our queries may not have bug + urls = ['http://ci-health-rdo.tripleo.org/#%s' % x for x in bugs] return urls @@ -168,7 +148,9 @@ class FailEvent(object): # Assume one queue per gerrit event if len(self.failed_jobs) == 0: return None - return self.failed_jobs[0].url.split('/')[6] + if len(self.failed_jobs[0].url.split('/')) >= 7: + return self.failed_jobs[0].url.split('/')[6] + return None def build_short_uuids(self): return [job.build_short_uuid for job in self.failed_jobs] @@ -225,7 +207,7 @@ class Stream(object): # these items. It's orthoginal to non voting ES searching. if " (non-voting)" in line: continue - m = re.search(r"- ([\w-]+)\s*(http://\S+)\s*:\s*FAILURE", line) + m = re.search(r"([\w-]+)\s*(https?://\S+)\s*:\s*FAILURE", line) if m: failed_tests.append(FailJob(m.group(1), m.group(2))) return failed_tests @@ -243,7 +225,7 @@ class Stream(object): def _has_required_files(self, change, patch, name, build_short_uuid): query = qb.files_ready(change, patch, name, build_short_uuid) r = self.es.search(query, size='80', recent=True) - files = [x['term'] for x in r.terms] + files = [x["_source"]["filename"] for x in r.hits["hits"]] # TODO(dmsimard): Reliably differentiate zuul v2 and v3 jobs required = required_files(name) missing_files = [x for x in required if x not in files] @@ -255,11 +237,12 @@ class Stream(object): def _does_es_have_data(self, event): """Wait till ElasticSearch is ready, but return False if timeout.""" - # We wait 20 minutes wall time since receiving the event until we - # treat the logs as missing - timeout = 1200 - # Wait 40 seconds between queries. - sleep_time = 40 + # We wait 5 minutes wall time since receiving the event until we + # treat the logs as missing. And for now 5 minutes is enough since we + # don't have too many jobs being collected. + timeout = 300 + # Wait 300 seconds between queries. + sleep_time = 300 timed_out = False job = None # This checks that we've got the console log uploaded, need to retry @@ -285,7 +268,7 @@ class Stream(object): self.log.debug(e) except FilesNotReady as e: self.log.info(e) - except pyelasticsearch.exceptions.InvalidJsonResponseError: + except elasticsearch.SerializationError: # If ElasticSearch returns an error code, sleep and retry # TODO(jogo): if this works pull out search into a helper # function that does this. @@ -329,6 +312,7 @@ class Stream(object): # bail if the failure is from a project # that hasn't run any of the included jobs if not fevent.is_included_job(): + self.log.debug("Ignored comment: %s", fevent.comment) continue self.log.info("Looking for failures in %d,%d on %s", @@ -390,7 +374,7 @@ class Classifier(object): def most_recent(self): """Return the datetime of the most recently indexed event.""" query = qb.most_recent_event() - results = self.es.search(query, size='1') + results = self.es.search(query, size='1', days=7) if len(results) > 0: last = dp.parse(results[0].timestamp) return last @@ -405,26 +389,27 @@ class Classifier(object): bug_matches = [] engine = sqlalchemy.create_engine(self.config.db_uri) Session = orm.sessionmaker(bind=engine) - session = Session() + Session() for x in self.queries: if x.get('suppress-notification'): continue self.log.debug( "Looking for bug: https://bugs.launchpad.net/bugs/%s", x['bug']) + query = qb.single_patch(x['query'], change_number, patch_number, build_short_uuid) results = self.es.search(query, size='10', recent=recent) if len(results) > 0: - if x.get('test_ids', None): - test_ids = x['test_ids'] - self.log.debug( - "For bug %s checking subunit2sql for failures on " - "test_ids: %s", x['bug'], test_ids) - if check_failed_test_ids_for_job(build_short_uuid, - test_ids, session): - bug_matches.append(x['bug']) + response = requests.get("https://bugs.launchpad.net/bugs/" + + x['bug']) + if response.status_code != 200: + bug_matches.append(x['bug'] + + ": " + + x.get('msg', + re.escape(x.get('query', "")))) else: - bug_matches.append(x['bug']) + bug_matches.append(x['bug'] + ": " + x.get( + 'msg', re.escape(x.get('query', "")))) return bug_matches diff --git a/elastic_recheck/loader.py b/elastic_recheck/loader.py index b6a7be0f..caee5f76 100644 --- a/elastic_recheck/loader.py +++ b/elastic_recheck/loader.py @@ -22,7 +22,7 @@ import os.path import yaml -def load(directory='queries'): +def load(directory="/opt/elastic-recheck/queries"): """Load queries from a set of yaml files in a directory.""" bugs = glob.glob("%s/*.yaml" % directory) data = [] diff --git a/elastic_recheck/query_builder.py b/elastic_recheck/query_builder.py index 2958eb99..2c283824 100644 --- a/elastic_recheck/query_builder.py +++ b/elastic_recheck/query_builder.py @@ -53,7 +53,8 @@ def generic(raw_query, facet=None): if isinstance(facet, list): data = dict(fields=facet, size=200) - query['facets'] = { + # facets moved to aggs + query['aggs'] = { "tag": { "terms": data } @@ -77,15 +78,16 @@ def result_ready(change, patchset, name, short_uuid): """ # TODO(dmsimard): Revisit this query once Zuul v2 is no longer supported # Let's value legibility over pep8 line width here... + # build_short_uuid doesnt return the whole uuid in rdo es query = ( - '((filename:"job-output.txt" AND message:"POST-RUN END" AND message:"project-config/playbooks/base/post-ssh.yaml")' # noqa E501 + '((filename:"job-output.txt" AND message:"POST-RUN END" AND message:"post.yaml")' # noqa E501 ' OR ' '(filename:"console.html" AND (message:"[Zuul] Job complete" OR message:"[SCP] Copying console log" OR message:"Grabbing consoleLog")))' # noqa E501 ' AND build_status:"FAILURE"' ' AND build_change:"{change}"' ' AND build_patchset:"{patchset}"' ' AND build_name:"{name}"' - ' AND build_short_uuid:"{short_uuid}"' + ' AND build_uuid:"{short_uuid}"' ) return generic(query.format( change=change, @@ -107,9 +109,9 @@ def files_ready(review, patch, name, build_short_uuid): 'AND build_change:"%s" ' 'AND build_patchset:"%s" ' 'AND build_name:"%s" ' - 'AND build_short_uuid:%s' % + 'AND build_uuid:%s' % (review, patch, name, build_short_uuid), - facet='filename') + facet='filename.name') def single_patch(query, review, patch, build_short_uuid): @@ -121,7 +123,7 @@ def single_patch(query, review, patch, build_short_uuid): return generic('%s ' 'AND build_change:"%s" ' 'AND build_patchset:"%s" ' - 'AND build_short_uuid:%s' % + 'AND build_uuid:%s' % (query, review, patch, build_short_uuid)) diff --git a/elastic_recheck/results.py b/elastic_recheck/results.py index b1fa410b..0c49907b 100644 --- a/elastic_recheck/results.py +++ b/elastic_recheck/results.py @@ -20,7 +20,8 @@ import datetime import pprint import dateutil.parser as dp -import pyelasticsearch +import elasticsearch +from elasticsearch import Elasticsearch import pytz @@ -39,10 +40,11 @@ class SearchEngine(object): return self.index_cache[index] try: - es.status(index=index) + es.indices.stats(index=index) + # es.indices.status(index=index) self.index_cache[index] = True return True - except pyelasticsearch.exceptions.ElasticHttpNotFoundError: + except elasticsearch.exceptions.NotFoundError: return False def search(self, query, size=1000, recent=False, days=0): @@ -65,8 +67,9 @@ class SearchEngine(object): The returned result is a ResultSet query. """ - es = pyelasticsearch.ElasticSearch(self._url) + es = Elasticsearch(self._url) args = {'size': size} + indexes = [] if recent or days: # today's index datefmt = self._indexfmt @@ -87,8 +90,15 @@ class SearchEngine(object): if self._is_valid_index(es, index_name): indexes.append(index_name) args['index'] = indexes - - results = es.search(query, **args) + if isinstance(query, str): + query = {"query": { + "query_string": { + "query": query + } + } + } + params = {"size": size, "request_timeout": 40} + results = es.search(index=indexes, body=query, params=params) return ResultSet(results) @@ -161,7 +171,7 @@ class FacetSet(dict): # is too large and ES won't return it. At some point we should probably # log a warning/error for these so we can clean them up. if facet == "timestamp" and data is not None: - ts = dp.parse(data) + ts = dp.parse(data).replace(tzinfo=pytz.utc) tsepoch = int(calendar.timegm(ts.timetuple())) # take the floor based on resolution ts -= datetime.timedelta( diff --git a/elastic_recheck/tests/functional/test_queries.py b/elastic_recheck/tests/functional/test_queries.py index c93097a8..cf24283d 100644 --- a/elastic_recheck/tests/functional/test_queries.py +++ b/elastic_recheck/tests/functional/test_queries.py @@ -67,7 +67,7 @@ class Context(): def _is_valid_ElasticSearch_query(self, x, bug) -> bool: query = qb.generic(x['query']) - results = self.classifier.es.search(query, size='10') + results = self.classifier.es.search(query, size='10', days=1) valid_query = len(results) > 0 if not valid_query: diff --git a/elastic_recheck/tests/unit/__init__.py b/elastic_recheck/tests/unit/__init__.py index c639c20d..7f31326d 100644 --- a/elastic_recheck/tests/unit/__init__.py +++ b/elastic_recheck/tests/unit/__init__.py @@ -52,5 +52,5 @@ class UnitTestCase(elastic_recheck.tests.TestCase): def setUp(self): super(UnitTestCase, self).setUp() - self.useFixture(fixtures.MonkeyPatch('pyelasticsearch.ElasticSearch', + self.useFixture(fixtures.MonkeyPatch('elasticsearch.ElasticSearch', FakeES)) diff --git a/elastic_recheck/tests/unit/test_bot.py b/elastic_recheck/tests/unit/test_bot.py index e4959905..9077067b 100644 --- a/elastic_recheck/tests/unit/test_bot.py +++ b/elastic_recheck/tests/unit/test_bot.py @@ -115,7 +115,7 @@ class TestBotWithTestTools(tests.TestCase): reference = ("openstack/keystone change: https://review.opendev.org/" "64750 failed because of: " "gate-keystone-python26: " - "https://bugs.launchpad.net/bugs/123456, " + "http://ci-health-rdo.tripleo.org/#123456, " "gate-keystone-python27: unrecognized error") self.assertEqual(reference, msg) diff --git a/elastic_recheck/tests/unit/test_elastic_recheck.py b/elastic_recheck/tests/unit/test_elastic_recheck.py index 1e758e87..e89c0b4d 100644 --- a/elastic_recheck/tests/unit/test_elastic_recheck.py +++ b/elastic_recheck/tests/unit/test_elastic_recheck.py @@ -21,18 +21,25 @@ from elastic_recheck.tests import unit class TestElasticRecheck(unit.UnitTestCase): def test_hits_by_query_no_results(self): c = er.Classifier("queries.yaml") - results = c.hits_by_query("this should find no bugs") + results = c.hits_by_query("this_should_find_no_bugs", days=10) self.assertEqual(len(results), 0) - self.assertEqual(results.took, 53) + # removing took which was hardcoded to 53 as it varies self.assertEqual(results.timed_out, False) def test_hits_by_query(self): + # TODO(dasm): This test queries Kibana server, + # which might be unavailable at any time. + # We need to make it independent from it. + c = er.Classifier("queries.yaml") - q = ('''message:"Cannot ''createImage''"''' - ''' AND filename:"console.html" AND voting:1''') - results = c.hits_by_query(q) - self.assertEqual(len(results), 20) - self.assertEqual(results.took, 46) + # updating the query to ensure we get at least some hits + q = 'filename:"job-output.txt" AND ' \ + 'message:"POST-RUN END" AND message:"post.yaml"' + # NOTE(dasm): Retrieve 10 days worth of upstream logs + # Otherwise, we might not have enough data in ES + results = c.hits_by_query(q, days=10) + # As 100 is the maximum results retrieved from the server + self.assertEqual(len(results), 100) self.assertEqual(results.timed_out, False) @@ -55,25 +62,3 @@ class TestSubunit2sqlCrossover(unit.UnitTestCase): ['test1', 'test4'], mock.sentinel.session) self.assertFalse(res) - - @mock.patch.object(er, 'check_failed_test_ids_for_job', return_value=True) - def test_classify_with_test_id_filter_match(self, mock_id_check): - c = er.Classifier('./elastic_recheck/tests/unit/queries_with_filters') - es_mock = mock.patch.object(c.es, 'search', return_value=[1, 2, 3]) - es_mock.start() - self.addCleanup(es_mock.stop) - res = c.classify(1234, 1, 'fake') - self.assertEqual(res, ['1234567'], - "classify() returned %s when it should have returned " - "a list with one bug id: '1234567'" % res) - - @mock.patch.object(er, 'check_failed_test_ids_for_job', return_value=False) - def test_classify_with_test_id_filter_no_match(self, mock_id_check): - c = er.Classifier('./elastic_recheck/tests/unit/queries_with_filters') - es_mock = mock.patch.object(c.es, 'search', return_value=[1, 2, 3]) - es_mock.start() - self.addCleanup(es_mock.stop) - res = c.classify(1234, 1, 'fake') - self.assertEqual(res, [], - "classify() returned bug matches %s when none should " - "have been found" % res) diff --git a/elastic_recheck/tests/unit/test_results.py b/elastic_recheck/tests/unit/test_results.py index fec94117..c72733c9 100644 --- a/elastic_recheck/tests/unit/test_results.py +++ b/elastic_recheck/tests/unit/test_results.py @@ -16,7 +16,8 @@ import datetime import json import mock -import pyelasticsearch +import elasticsearch +from elasticsearch import Elasticsearch from elastic_recheck import results from elastic_recheck import tests @@ -112,7 +113,7 @@ class MockDatetimeYesterday(datetime.datetime): '%Y-%m-%dT%H:%M:%S') -@mock.patch.object(pyelasticsearch.ElasticSearch, 'search', return_value={}) +@mock.patch.object(Elasticsearch, 'search', return_value={}) class TestSearchEngine(tests.TestCase): """Tests that the elastic search API is called correctly.""" @@ -125,7 +126,9 @@ class TestSearchEngine(tests.TestCase): # Tests a basic search with recent=False. result_set = self.engine.search(self.query, size=10) self.assertEqual(0, len(result_set)) - search_mock.assert_called_once_with(self.query, size=10) + search_mock.assert_called_once_with(body={'query': { + 'query_string': {'query': self.query} + }}, params={'size': 10, "request_timeout": 40}, index=[]) def _test_search_recent(self, search_mock, datetime_mock, expected_indexes): @@ -133,14 +136,17 @@ class TestSearchEngine(tests.TestCase): result_set = self.engine.search(self.query, size=10, recent=True) self.assertEqual(0, len(result_set)) search_mock.assert_called_once_with( - self.query, size=10, index=expected_indexes) + body={'query': {'query_string': {'query': self.query}}}, + params={'size': 10, "request_timeout": 40}, + index=expected_indexes) def test_search_recent_current_index_only(self, search_mock): # The search index comparison goes back one hour and cuts off by day, # so test that we're one hour and one second into today so we only have # one index in the search call. with mock.patch.object( - pyelasticsearch.ElasticSearch, 'status') as mock_data: + elasticsearch.client.indices.IndicesClient, 'stats') \ + as mock_data: mock_data.return_value = "Not an exception" self._test_search_recent(search_mock, MockDatetimeToday, expected_indexes=['logstash-2014.06.12']) @@ -150,7 +156,8 @@ class TestSearchEngine(tests.TestCase): # so test that we're 59 minutes and 59 seconds into today so that we # have an index for today and yesterday in the search call. with mock.patch.object( - pyelasticsearch.ElasticSearch, 'status') as mock_data: + elasticsearch.client.indices.IndicesClient, 'stats') \ + as mock_data: mock_data.return_value = "Not an exception" self._test_search_recent(search_mock, MockDatetimeYesterday, expected_indexes=['logstash-2014.06.12', @@ -159,22 +166,30 @@ class TestSearchEngine(tests.TestCase): def test_search_no_indexes(self, search_mock): # Test when no indexes are valid with mock.patch.object( - pyelasticsearch.ElasticSearch, 'status') as mock_data: - mock_data.side_effect = pyelasticsearch.exceptions.\ - ElasticHttpNotFoundError() + elasticsearch.client.indices.IndicesClient, 'stats') \ + as mock_data: + mock_data.side_effect = elasticsearch.exceptions.NotFoundError self._test_search_recent(search_mock, MockDatetimeYesterday, expected_indexes=[]) def test_search_days(self, search_mock): # Test when specific days are used. with mock.patch.object( - pyelasticsearch.ElasticSearch, 'status') as mock_data: + elasticsearch.client.indices.IndicesClient, 'stats') \ + as mock_data: mock_data.return_value = "Not an exception" datetime.datetime = MockDatetimeYesterday result_set = self.engine.search(self.query, size=10, days=3, recent=False) self.assertEqual(0, len(result_set)) - search_mock.assert_called_once_with(self.query, size=10, - index=['logstash-2014.06.12', - 'logstash-2014.06.11', - 'logstash-2014.06.10']) + search_mock.assert_called_once_with(body={ + 'query': { + 'query_string': { + 'query': self.query + } + } + }, + params={'size': 10, "request_timeout": 40}, + index=['logstash-2014.06.12', + 'logstash-2014.06.11', + 'logstash-2014.06.10']) diff --git a/elastic_recheck/tests/unit/test_stream.py b/elastic_recheck/tests/unit/test_stream.py index 3553741a..86de6b08 100644 --- a/elastic_recheck/tests/unit/test_stream.py +++ b/elastic_recheck/tests/unit/test_stream.py @@ -152,10 +152,10 @@ class TestStream(tests.TestCase): self.assertTrue(event.is_included_job()) self.assertEqual(event.queue(), "gate") self.assertEqual(event.bug_urls(), - ['https://bugs.launchpad.net/bugs/123456']) + ['http://ci-health-rdo.tripleo.org/#123456']) errors = ['gate-keystone-python27: unrecognized error', 'gate-keystone-python26: ' - 'https://bugs.launchpad.net/bugs/123456'] + 'http://ci-health-rdo.tripleo.org/#123456'] bug_map = event.bug_urls_map() for error in errors: self.assertIn(error, bug_map) @@ -180,10 +180,10 @@ class TestStream(tests.TestCase): self.assertTrue(event.is_included_job()) self.assertEqual(event.queue(), "check") self.assertEqual(event.bug_urls(), - ['https://bugs.launchpad.net/bugs/123456']) + ['http://ci-health-rdo.tripleo.org/#123456']) self.assertEqual(event.bug_urls_map(), ['gate-keystone-python26: ' - 'https://bugs.launchpad.net/bugs/123456', + 'http://ci-health-rdo.tripleo.org/#123456', 'gate-keystone-python27: unrecognized error']) self.assertEqual(sorted(event.failed_job_names()), ['gate-keystone-python26', diff --git a/queries2/ansible-errors.yaml b/queries2/ansible-errors.yaml new file mode 100644 index 00000000..5832fbf4 --- /dev/null +++ b/queries2/ansible-errors.yaml @@ -0,0 +1,3 @@ +query: > + message:"AnsibleUndefinedVariable" + AND (tags:"console.html" OR tags:"job-output.txt") diff --git a/recheckwatchbot.yaml b/recheckwatchbot.yaml index 51c76a77..1e2ae313 100644 --- a/recheckwatchbot.yaml +++ b/recheckwatchbot.yaml @@ -23,14 +23,14 @@ messages: %(bugs)s footer: >- For more details on this and other bugs, please see - http://status.openstack.org/elastic-recheck/ + http://ci-health-rdo.tripleo.org/ recheck_instructions: >- If you believe we've correctly identified the failure, feel free to leave a 'recheck' comment to run the tests again. unrecognized: >- Some of the tests failed in a way that we did not understand. Please help us classify these issues so that they can be part of Elastic Recheck - http://status.openstack.org/elastic-recheck/ + http://ci-health-rdo.tripleo.org/ no_bugs_found: >- I noticed Zuul failed, refer to: - http://docs.openstack.org/infra/manual/developers.html#automated-testing \ No newline at end of file + http://docs.openstack.org/infra/manual/developers.html#automated-testing diff --git a/requirements.txt b/requirements.txt index 75eeaf1e..42d7ff2e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ pbr>=1.8 python-dateutil>=2.0 pytz -pyelasticsearch<1.0 +elasticsearch==7.14.0 gerritlib python-daemon>=2.2.0 irc>=17.0 diff --git a/tools/ssh-check.py b/tools/ssh-check.py new file mode 100644 index 00000000..442f63af --- /dev/null +++ b/tools/ssh-check.py @@ -0,0 +1,34 @@ +import logging +import paramiko +import os + + +logging.basicConfig() +LOG = logging.getLogger("paramiko") +LOG.setLevel(logging.DEBUG) + +hostname = "review.opendev.org" +port = 29418 +username = os.environ.get("GERRIT_USER", None) # current user unless mentioned +keyfile = None # implicit key, if any + +LOG.info(f"Trying ssh connection to {username}@{hostname}:{port}") +client = paramiko.SSHClient() +client.load_system_host_keys() +client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + +client.connect( + hostname, + username=username, + port=port, + key_filename=keyfile) + +(stdin, stdout, stderr) = client.exec_command("gerrit version") +for line in stdout.readlines(): + print(line) + +# Avoid confusing "TypeError: 'NoneType' object is not callable" exception +# https://github.com/paramiko/paramiko/issues/1078 +if client is not None: + client.close() + del client, stdin, stdout, stderr diff --git a/tools/unaccounted_rechecks.py b/tools/unaccounted_rechecks.py index 872cadf9..bf98eac0 100755 --- a/tools/unaccounted_rechecks.py +++ b/tools/unaccounted_rechecks.py @@ -28,7 +28,7 @@ def get_options(): description='Find rechecks not accounted for in ER') parser.add_argument('-u', '--user', help='Gerrit User', default=getpass.getuser()) - tryfiles = ('id_gerrit', 'id_rsa', 'id_dsa') + tryfiles = ('id_gerrit', 'id_rsa', 'id_ecdsa', 'id_dsa') default_key = "" for f in tryfiles: trykey = os.path.join(os.path.expanduser("~"), '.ssh', f) diff --git a/tox.ini b/tox.ini index 85c3ed8b..548a07d8 100644 --- a/tox.ini +++ b/tox.ini @@ -58,8 +58,8 @@ commands = bindep test [testenv:linters] basepython = python3 deps = - flake8==3.8.3 - pylint==2.6.0 + flake8==4.0.1 + pylint==2.8.3 -r{toxinidir}/test-requirements.txt commands = flake8 diff --git a/web/conf/nginx.conf b/web/conf/nginx.conf new file mode 100644 index 00000000..b55c8959 --- /dev/null +++ b/web/conf/nginx.conf @@ -0,0 +1,27 @@ +# /etc/nginx/conf.d/default.conf + +server { + listen 80 default_server; + listen [::]:80 default_server; + + error_log /dev/stdout info; + + root /var/www/localhost; + + + location ~ ^/data/(.*) { + alias /data/www/$1; + } + + location ~ ^/elastic-recheck\/data/(.*\.json) { + alias /data/www/$1; + } + + location / { + error_page 404 = @fallback; + } + + location @fallback { + proxy_pass http://status.openstack.org; + } +} diff --git a/web/share/favicon.ico b/web/share/favicon.ico new file mode 100644 index 00000000..b0e36990 Binary files /dev/null and b/web/share/favicon.ico differ diff --git a/web/share/gate.html b/web/share/gate.html index c9a2ac0b..d927752f 100644 --- a/web/share/gate.html +++ b/web/share/gate.html @@ -1,58 +1,58 @@ - Elastic Recheck + src="/jquery.min.js"> + src="/jquery-visibility.min.js"> + src="/jquery-graphite.js"> + src="/common.js"> + src="/jquery.canvaswrapper.js"> + src="/jquery.flot.js"> + src="/jquery.flot.saturated.js"> + src="/jquery.flot.uiConstants.js"> + src="/jquery.flot.browser.js"> + src="/jquery.colorhelpers.js"> + src="/jquery.flot.drawSeries.js"> + src="/jquery.flot.time.js"> + src="/elastic-recheck.js"> - + - - + + - + - + - + - + @@ -73,6 +73,7 @@

{{bug.fails24}} fails in 24 hrs / {{bug.fails}} fails in 10 days

Projects: {{bug.bug_data.affects}}

+

Details: {{bug.msg}}

{{#unless bug.voting}} @@ -88,7 +89,7 @@ {{/if}}
Logstash - Launchpad + Launchpad diff --git a/web/share/index.html b/web/share/index.html index b958ab3e..aff9c97d 100644 --- a/web/share/index.html +++ b/web/share/index.html @@ -1,59 +1,59 @@ - Elastic Recheck + src="/jquery.min.js"> + src="/jquery-visibility.min.js"> + src="/jquery-graphite.js"> + src="/common.js"> + src="/jquery.canvaswrapper.js"> + src="/jquery.flot.js"> + src="/jquery.flot.saturated.js"> + src="/jquery.flot.uiConstants.js"> + src="/jquery.flot.browser.js"> + src="/jquery.colorhelpers.js"> + src="/jquery.flot.drawSeries.js"> + src="/jquery.flot.time.js"> + src="/elastic-recheck.js"> - + - - + + - + - + - + + href="https://www.openstack.org/themes/openstack/css/main.css" /> @@ -76,6 +76,7 @@

{{bug.fails24}} fails in 24 hrs / {{bug.fails}} fails in 10 days

Projects: {{bug.bug_data.affects}}

+

Details: {{bug.msg}}

{{#if bug.bug_data.reviews}}

Open Reviews:

@@ -86,7 +87,7 @@ {{/if}}
Logstash - Launchpad + Launchpad
@@ -96,7 +97,7 @@
  • Uncategorized Integrated Gate Jobs
  • Uncategorized
  • -

    The elastic-recheck project uses Elasticsearch to classify and track OpenStack gate failures. Documentation can be found here: http://docs.openstack.org/infra/elastic-recheck/. You can also learn more by reading this post on the Elasticsearch blog: OpenStack elastic-recheck: powered by the elk stack.

    +

    The elastic-recheck project uses Elasticsearch to classify and track OpenStack gate failures. Documentation can be found here: docs.openstack.org/infra/elastic-recheck/. You can also learn more by reading this post on the Elasticsearch blog: OpenStack elastic-recheck: powered by the elk stack.

    diff --git a/web/share/templates/base.html b/web/share/templates/base.html index e43c2b5b..30b6c877 100644 --- a/web/share/templates/base.html +++ b/web/share/templates/base.html @@ -1,42 +1,42 @@ - Elastic Recheck + src="/jquery.min.js"> + src="/jquery-visibility.min.js"> + src="/jquery-graphite.js"> + src="/common.js"> + src="/jquery.flot.js"> + src="/jquery.flot.time.js"> + src="/elastic-recheck.js"> - + - - + + - + - + - +