Skip to content

Commit

Permalink
Merge pull request #1211 from GSA/main
Browse files Browse the repository at this point in the history
7/24/2024 Production Deploy
  • Loading branch information
stvnrlly authored Jul 25, 2024
2 parents 72dc0e3 + 9cdd8c3 commit 2d117ea
Show file tree
Hide file tree
Showing 13 changed files with 178 additions and 44 deletions.
1 change: 1 addition & 0 deletions app/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,7 @@ def register_blueprint(application):


def init_app(app):

@app.before_request
def record_request_details():
g.start = monotonic()
Expand Down
71 changes: 71 additions & 0 deletions app/aws/s3.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,77 @@
JOBS_CACHE_MISSES = "JOBS_CACHE_MISSES"


def list_s3_objects():
bucket_name = current_app.config["CSV_UPLOAD_BUCKET"]["bucket"]
access_key = current_app.config["CSV_UPLOAD_BUCKET"]["access_key_id"]
secret_key = current_app.config["CSV_UPLOAD_BUCKET"]["secret_access_key"]
region = current_app.config["CSV_UPLOAD_BUCKET"]["region"]
session = Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
region_name=region,
)
s3 = session.client("s3")

try:
response = s3.list_objects_v2(Bucket=bucket_name)
while True:
for obj in response.get("Contents", []):
yield obj["Key"]
if "NextContinuationToken" in response:
response = s3.list_objects_v2(
Bucket=bucket_name,
ContinuationToken=response["NextContinuationToken"],
)
else:
break
except Exception as e:
current_app.logger.error(
f"An error occurred while regenerating cache #notify-admin-1200 {e}"
)


def get_s3_files():
current_app.logger.info("Regenerate job cache #notify-admin-1200")
bucket_name = current_app.config["CSV_UPLOAD_BUCKET"]["bucket"]
access_key = current_app.config["CSV_UPLOAD_BUCKET"]["access_key_id"]
secret_key = current_app.config["CSV_UPLOAD_BUCKET"]["secret_access_key"]
region = current_app.config["CSV_UPLOAD_BUCKET"]["region"]
session = Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
region_name=region,
)
objects = list_s3_objects()

s3res = session.resource("s3", config=AWS_CLIENT_CONFIG)
current_app.logger.info(
f"JOBS cache length before regen: {len(JOBS)} #notify-admin-1200"
)
for object in objects:
# We put our csv files in the format "service-{service_id}-notify/{job_id}"
try:
object_arr = object.split("/")
job_id = object_arr[1] # get the job_id
job_id = job_id.replace(".csv", "") # we just want the job_id
if JOBS.get(job_id) is None:
object = (
s3res.Object(bucket_name, object)
.get()["Body"]
.read()
.decode("utf-8")
)
if "phone number" in object.lower():
JOBS[job_id] = object
except LookupError as le:
# perhaps our key is not formatted as we expected. If so skip it.
current_app.logger.error(f"LookupError {le} #notify-admin-1200")

current_app.logger.info(
f"JOBS cache length after regen: {len(JOBS)} #notify-admin-1200"
)


def get_s3_file(bucket_name, file_location, access_key, secret_key, region):
s3_file = get_s3_object(bucket_name, file_location, access_key, secret_key, region)
return s3_file.get()["Body"].read().decode("utf-8")
Expand Down
18 changes: 17 additions & 1 deletion app/celery/provider_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
from datetime import timedelta

from botocore.exceptions import ClientError
from flask import current_app
from sqlalchemy.orm.exc import NoResultFound

Expand All @@ -22,7 +23,7 @@

# This is the amount of time to wait after sending an sms message before we check the aws logs and look for delivery
# receipts
DELIVERY_RECEIPT_DELAY_IN_SECONDS = 120
DELIVERY_RECEIPT_DELAY_IN_SECONDS = 30


@notify_celery.task(
Expand Down Expand Up @@ -62,6 +63,21 @@ def check_sms_delivery_receipt(self, message_id, notification_id, sent_at):
provider_response=provider_response,
)
raise self.retry(exc=ntfe)
except ClientError as err:
# Probably a ThrottlingException but could be something else
error_code = err.response["Error"]["Code"]
provider_response = (
f"{error_code} while checking sms receipt -- still looking"
)
status = "pending"
carrier = ""
update_notification_status_by_id(
notification_id,
status,
carrier=carrier,
provider_response=provider_response,
)
raise self.retry(exc=err)

if status == "success":
status = NotificationStatus.DELIVERED
Expand Down
5 changes: 5 additions & 0 deletions app/celery/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,11 @@ def send_inbound_sms_to_service(self, inbound_sms_id, service_id):
)


@notify_celery.task(name="regenerate-job-cache")
def regenerate_job_cache():
s3.get_s3_files()


@notify_celery.task(name="process-incomplete-jobs")
def process_incomplete_jobs(job_ids):
jobs = [dao_get_job_by_id(job_id) for job_id in job_ids]
Expand Down
5 changes: 5 additions & 0 deletions app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,11 @@ class Config(object):
"schedule": crontab(hour=6, minute=0),
"options": {"queue": QueueNames.PERIODIC},
},
"regenerate-job-cache": {
"task": "regenerate-job-cache",
"schedule": crontab(minute="*/30"),
"options": {"queue": QueueNames.PERIODIC},
},
"cleanup-unfinished-jobs": {
"task": "cleanup-unfinished-jobs",
"schedule": crontab(hour=4, minute=5),
Expand Down
2 changes: 2 additions & 0 deletions app/service/rest.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,8 @@ def get_service_by_id(service_id):
fetched = dao_fetch_service_by_id(service_id)

data = service_schema.dump(fetched)

current_app.logger.info(f'>> SERVICE: {data["id"]}; {data}')
return jsonify(data=data)


Expand Down
4 changes: 2 additions & 2 deletions deploy-config/production.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
env: production
web_instances: 2
web_memory: 2G
web_memory: 4G
worker_instances: 1
worker_memory: 2G
worker_memory: 4G
scheduler_memory: 256M
public_api_route: notify-api.app.cloud.gov
admin_base_url: https://beta.notify.gov
Expand Down
49 changes: 41 additions & 8 deletions docs/all.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,13 @@
- [Data Storage Policies \& Procedures](#data-storage-policies--procedures)
- [Potential PII Locations](#potential-pii-locations)
- [Data Retention Policy](#data-retention-policy)
- [Debug messages not being sent](#debug-messages-not-being-sent)
- [Getting the file location and tracing what happens](#getting-the-file-location-and-tracing-what-happens)
- [Viewing the csv file](#viewing-the-csv-file)
- [Troubleshooting](#troubleshooting)
- [Debug messages not being sent](#debug-messages-not-being-sent)
- [Getting the file location and tracing what happens](#getting-the-file-location-and-tracing-what-happens)
- [Viewing the csv file](#viewing-the-csv-file)
- [Deployment / app push problems](#deployment--app-push-problems)
- [Routes cannot be mapped to destinations in different spaces](#routes-cannot-be-mapped-to-destinations-in-different-spaces)
- [API request failed](#api-request-failed)


# Infrastructure overview
Expand Down Expand Up @@ -449,7 +453,10 @@ If this is the first time you have used Terraform in this repository, you will f
```
cf push --vars-file deploy-config/sandbox.yml --var NEW_RELIC_LICENSE_KEY=$NEW_RELIC_LICENSE_KEY
```
The real `push` command has more var arguments than the single one above. Get their values from a Notify team member.
1. Visit the URL of the app you just deployed
* Admin https://notify-sandbox.app.cloud.gov/
* API https://notify-api-sandbox.app.cloud.gov/
# Database management
Expand Down Expand Up @@ -1327,11 +1334,12 @@ Seven (7) days by default. Each service can be set with a custom policy via `Ser

Data cleanup is controlled by several tasks in the `nightly_tasks.py` file, kicked off by Celery Beat.

# Troubleshooting

# Debug messages not being sent
## Debug messages not being sent


## Getting the file location and tracing what happens
### Getting the file location and tracing what happens


Ask the user to provide the csv file name. Either the csv file they uploaded, or the one that is autogenerated when they do a one-off send and is visible in the UI
Expand All @@ -1340,7 +1348,7 @@ Starting with the admin logs, search for this file name. When you find it, the

In the api logs, search by job_id. Either you will see evidence of the job failing and retrying over and over (in which case search for a stack trace using timestamp), or you will ultimately get to a log line that links the job_id to a message_id. In this case, now search by message_id. You should be able to find the actual result from AWS, either success or failure, with hopefully some helpful info.

## Viewing the csv file
### Viewing the csv file

If you need to view the questionable csv file on production, run the following command:

Expand All @@ -1355,11 +1363,36 @@ locally, just do:
poetry run flask command download-csv-file-by-name -f <file location in admin logs>
```

## Debug steps
### Debug steps

1. Either send a message and capture the csv file name, or get a csv file name from a user
2. Using the log tool at logs.fr.cloud.gov, use filters to limit what you're searching on (cf.app is 'notify-admin-production' for example) and then search with the csv file name in double quotes over the relevant time period (last 5 minutes if you just sent a message, or else whatever time the user sent at)
3. When you find the log line, you should also find the job_id and the s3 file location. Save these somewhere.
4. To get the csv file contents, you can run the command above. This command currently prints to the notify-api log, so after you run the command,
you need to search in notify-api-production for the last 5 minutes with the logs sorted by timestamp. The contents of the csv file unfortunately appear on separate lines so it's very important to sort by time.
5. If you want to see where the message actually failed, search with cf.app is notify-api-production using the job_id that you saved in step #3. If you get far enough, you might see one of the log lines has a message_id. If you see it, you can switch and search on that, which should tell you what happened in AWS (success or failure).

## Deployment / app push problems

### Routes cannot be mapped to destinations in different spaces

During `cf push` you may see

```
For application 'notify-api-sandbox': Routes cannot be mapped to destinations in different spaces
```

:ghost: This indicates a ghost route squatting on a route you need to create. In the cloud.gov web interface, check for incomplete deployments. They might be holding on to a route. Delete them. Also, check the list of routes (from the CloudFoundry icon in the left sidebar) for routes without an associated app. If they look like a route your app would need to create, delete them.

### API request failed

After pushing the Admin app, you might see this in the logs

```
{"name": "app", "levelname": "ERROR", "message": "API unknown failed with status 503 message Request failed", "pathname": "/home/vcap/app/app/__init__.py", ...
```

This indicates that the Admin and API apps are unable to talk to each other because of either a missing route or a missing network policy. The apps require [container-to-container networking](https://cloud.gov/docs/management/container-to-container/) to communicate. List `cf network-policies` and compare the output to our other deployed envs. If you find a policy is missing, you might have to create a network policy with something like:
```
cf add-network-policy notify-admin-sandbox notify-api-sandbox --protocol tcp --port 61443
```
44 changes: 22 additions & 22 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 2d117ea

Please sign in to comment.