我在一个 flask 应用程序中dockerizin和一切工作,直到容器创建后,有一个错误“名称或服务未知”
停靠文件:
FROM python:3.10.8
COPY requirements.txt .
RUN pip install -r requirements.txt
RUN python -c "import nltk; nltk.download('averaged_perceptron_tagger'); nltk.download('wordnet'); nltk.download('omw-1.4'); nltk.download('stopwords');"
COPY . .
EXPOSE 5000
CMD ["flask", "run", "--host= 0.0.0.0", "--port=5000"]
停靠-撰写.yml:
version: "3.7"
services:
mlapp:
container_name: Container
image: mlapp
ports:
- "5000:5000"
build:
context: .
dockerfile: Dockerfile
当我创建我的应用程序的本地服务器时,它给出了正确的答案,并且工作得很完美,我不明白是什么导致了这个问题。
应用程序py:
from flask import Flask, jsonify, request
from util import prediction
app = Flask(__name__)
@app.post('/predict')
def predict():
data = request.json
try:
sample = data['text']
except KeyError:
return jsonify({'error':'No text sent'})
# sample = [sample]
pred = prediction(sample)
try:
result = jsonify(pred)
except TypeError as e:
result = jsonify({'error': str(e)})
return result
if __name__ == '__main__':
app.run(host='0.0.0.0', debug= True)
利用率py
import nltk
import pandas as pd
from nltk import TweetTokenizer
import numpy as np
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
import pandas as pd
import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from nltk.tokenize import TweetTokenizer
from nltk.tag import pos_tag
import re
import string
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
import joblib
import warnings
warnings.filterwarnings("ignore")
# nltk.download('averaged_perceptron_tagger')
# nltk.download('wordnet')
# nltk.download('omw-1.4')
# nltk.download('stopwords')
token = TweetTokenizer()
def lemmatize_sentence(tokens):
lemmatizer = WordNetLemmatizer()
lemmatize_sentence = []
for word, tag in pos_tag(tokens):
if tag.startswith('NN'):
pos = 'n'
elif tag.startswith('VB'):
pos = 'v'
else:
pos = 'a'
lemmatize_sentence.append(lemmatizer.lemmatize(word, pos))
return lemmatize_sentence
# print(' '.join(lemmatize_sentence(data[0][0])))
# Data cleaning, getting rid of words not needed for analysis.
stop_words = stopwords.words('english')
def cleaned(token):
if token == 'u':
return 'you'
if token == 'r':
return 'are'
if token == 'some1':
return 'someone'
if token == 'yrs':
return 'years'
if token == 'hrs':
return 'hours'
if token == 'mins':
return 'minutes'
if token == 'secs':
return 'seconds'
if token == 'pls' or token == 'plz':
return 'please'
if token == '2morow':
return 'tomorrow'
if token == '2day':
return 'today'
if token == '4got' or token == '4gotten':
return 'forget'
if token == 'amp' or token == 'quot' or token == 'lt' or token == 'gt':
return ''
return token
# Noise removal from data, removing links, mentions and words with less than 3 length.
def remove_noise(tokens):
cleaned_tokens = []
for token, tag in pos_tag(tokens):
# using non capturing groups ?:)// and eleminating the token if its a link.
token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\(\),]|(?:%[0-9a-fA-F]))+', '', token)
token = re.sub('[^a-zA-Z]', ' ', token)
# eliminating token if its a mention
token = re.sub("(@[A-Za-z0-9_]+)", "", token)
if tag.startswith("NN"):
pos = 'n'
elif tag.startswith("VB"):
pos = 'v'
else:
pos = 'a'
lemmatizer = WordNetLemmatizer()
token = lemmatizer.lemmatize(token, pos)
cleaned_token = cleaned(token.lower())
# Eliminating if the length of the token is less than 3, if its a punctuation or if it is a stopword.
if cleaned_token not in string.punctuation and len(cleaned_token) > 2 and cleaned_token not in stop_words:
cleaned_tokens.append(cleaned_token)
return cleaned_tokens
with open ('Models/Sentimenttfpipe', 'rb') as f:
loaded_pipeline = joblib.load(f)
def prediction(body):
# loaded_pipeline = joblib.load('Api/Models/Sentimenttfpipe')
text= []
test = token.tokenize(body)
test = remove_noise(test)
text.append(" ".join(test))
test = pd.DataFrame(text, columns=['text'])
a = loaded_pipeline.predict(test['text'].values.astype('U'))
final = []
if a[0] == 0:
final.append({'Label' : 'Relaxed'})
return {'Label' : 'Relaxed'}
if a[0] == 1:
final.append({'Label' : 'Angry'})
return {'Label' : 'Angry'}
if a[0] == 2:
final.append({'Label' : 'Fearful'})
return {'Label' : 'Fearful'}
if a[0] == 3:
final.append({'Label' : 'Happy'})
return {'Label' : 'Happy'}
if a[0] == 4:
final.append({'Label' : 'Sad'})
return {'Label' : 'Sad'}
if a[0] == 5:
final.append({'Label' : 'Surprised'})
return {'Label' : 'Surprised'}
if __name__ == '__main__':
sen = "may the force be with you"
a = prediction(sen)
print(a)
# print(" ")
我已经尝试了相当多的谷歌冲浪,并没有找到解决方案,我试图改变小的代码,我认为可能会影响结果,但它没有。
我运行的命令是“docker compose up --build”,它在附加到容器时给出错误“Name or Service not known”。
1条答案
按热度按时间vbopmzt11#
在@大卫迷宫的帮助下找到了答案。
“host=”后有空格,导致了这个问题。删除空格使它运行。