Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

A new conversation starts! Human: hi History: {'dialog': [{'text': 'hi', 'speaker': 'usr'}]} Inputs after convert_data_to_inputs(): [] No inputs generated from history. #39

Open
SHIVAM3052 opened this issue Nov 30, 2024 · 0 comments

Comments

@SHIVAM3052
Copy link

Hi

Interact.py ==> Run through interact_strat.sh getting an issue :

coding=utf-8

import json
import datetime
import torch
from torch import Tensor
import numpy as np
import os
import logging
import argparse
import random

from transformers.trainer_utils import set_seed
from utils.building_utils import boolean_string, build_model, deploy_model
from inputters import inputters
from inputters.inputter_utils import _norm

def cut_seq_to_eos(sentence, eos, remove_id=None):
if remove_id is None:
remove_id = [-1]
sent = []
for s in sentence:
if s in remove_id:
continue
if s != eos:
sent.append(s)
else:
break
return sent

logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(name)

parser = argparse.ArgumentParser()
parser.add_argument('--config_name', type=str, required=True)
parser.add_argument('--inputter_name', type=str, required=True)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--load_checkpoint", '-c', type=str, default=None)
parser.add_argument("--fp16", type=boolean_string, default=False)

parser.add_argument("--single_turn", action='store_true')
parser.add_argument("--max_input_length", type=int, default=256)
parser.add_argument("--max_src_turn", type=int, default=20)
parser.add_argument("--max_decoder_input_length", type=int, default=64)
parser.add_argument("--max_knl_len", type=int, default=64)
parser.add_argument('--label_num', type=int, default=None)

parser.add_argument("--min_length", type=int, default=5)
parser.add_argument("--max_length", type=int, default=64)

parser.add_argument("--temperature", type=float, default=1)
parser.add_argument("--top_k", type=int, default=1)
parser.add_argument("--top_p", type=float, default=1)
parser.add_argument('--num_beams', type=int, default=1)
parser.add_argument("--repetition_penalty", type=float, default=1.0)
parser.add_argument("--no_repeat_ngram_size", type=int, default=0)

parser.add_argument("--use_gpu", action='store_true')

args = parser.parse_args()

device = torch.device("cuda" if torch.cuda.is_available() and args.use_gpu else "cpu")
n_gpu = torch.cuda.device_count()
args.device, args.n_gpu = device, n_gpu

if args.load_checkpoint is not None:
output_dir = args.load_checkpoint + '_interact_dialogs'
else:
os.makedirs('./DEMO', exist_ok=True)
output_dir = './DEMO/' + args.config_name
if args.single_turn:
output_dir = output_dir + '_1turn'
os.makedirs(output_dir, exist_ok=True)

#set_seed(args.seed)

strategy_list = [
"Question",
"Restatement or Paraphrasing",
"Reflection of feelings",
"Self-disclosure",
"Affirmation and Reassurance",
"Providing Suggestions",
"Information",
"Others"
]

names = {
'inputter_name': args.inputter_name,
'config_name': args.config_name,
}

toker, model, *_ = build_model(checkpoint=args.load_checkpoint, **names)
model = deploy_model(model, args)

model.eval()

inputter = inputtersargs.inputter_name
dataloader_kwargs = {
'max_src_turn': args.max_src_turn,
'max_input_length': args.max_input_length,
'max_decoder_input_length': args.max_decoder_input_length,
'max_knl_len': args.max_knl_len,
'label_num': args.label_num,
}

pad = toker.pad_token_id
if pad is None:
pad = toker.eos_token_id
assert pad is not None, 'either pad_token_id or eos_token_id should be provided'
bos = toker.bos_token_id
if bos is None:
bos = toker.cls_token_id
assert bos is not None, 'either bos_token_id or cls_token_id should be provided'
eos = toker.eos_token_id
if eos is None:
eos = toker.sep_token_id
assert eos is not None, 'either eos_token_id or sep_token_id should be provided'

generation_kwargs = {
'max_length': args.max_length,
'min_length': args.min_length,
'do_sample': True if (args.top_k > 0 or args.top_p < 1) else False,
'temperature': args.temperature,
'top_k': args.top_k,
'top_p': args.top_p,
'num_beams': args.num_beams,
'repetition_penalty': args.repetition_penalty,
'no_repeat_ngram_size': args.no_repeat_ngram_size,
'pad_token_id': pad,
'bos_token_id': bos,
'eos_token_id': eos,
}

eof_once = False
history = {'dialog': [],}
print('\n\nA new conversation starts!')

Debug-enhanced version of the loop

while True:
try:
if args.single_turn and len(history['dialog']) > 0:
raise EOFError
raw_text = input("Human: ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input("Human: ")
eof_once = False
except (EOFError, KeyboardInterrupt) as e:
if eof_once:
raise e
eof_once = True
save_name = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
try:
if len(history['dialog']) > 0:
with open(os.path.join(output_dir, save_name + '.json'), 'w') as f:
json.dump(history, f, ensure_ascii=False, indent=2)
except PermissionError as e:
pass

    history = {'dialog': [],}
    print('\n\nA new conversation starts!')
    continue

# Append human input to history
history['dialog'].append({
    'text': _norm(raw_text),
    'speaker': 'usr',
})

# Debugging: Print current history
print(f"History: {history}")  # Check structure and content of the history

# Prepare inputs for the model
inputs = inputter.convert_data_to_inputs(history, toker, **dataloader_kwargs)

# Debugging: Log the output of `convert_data_to_inputs()`
print(f"Inputs after convert_data_to_inputs(): {inputs}")

if not inputs:
    print("No inputs generated from history.")
    continue  # Skip this iteration if no inputs are generated

inputs = inputs[-1:]  # Get the latest input
features = inputter.convert_inputs_to_features(inputs, toker, **dataloader_kwargs)

# Debugging: Log the output of `convert_inputs_to_features()`
print(f"Features after convert_inputs_to_features(): {features}")

if not features:
    print("No features generated from inputs.")
    continue  # Skip this iteration if no features are generated

batch = inputter.prepare_infer_batch(features, toker)
batch = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in batch.items()}
batch.update(generation_kwargs)

# Debugging: Log the prepared batch
logger.info(f"Prepared batch for generation: {batch}")

# Generate response
encoded_info, generations = model.generate(**batch)

if generations is not None and len(generations) > 0:
    out = generations[0].tolist()
    out = cut_seq_to_eos(out, eos)
    text = toker.decode(out).encode('ascii', 'ignore').decode('ascii').strip()
    strat_id_out = encoded_info['pred_strat_id_top3'].tolist()[0][0]  # Get top1 strategy id
    strategy = id2strategy[strat_id_out]
    print("   AI: " + "[" + strategy + "]" + text)

    # Append AI response to history
    history['dialog'].append({
        'text': text,
        'speaker': 'sys',
        'strategy': strategy
    })
else:
    logger.error("No generations were produced.")====> 

bash RUN/interact_strat.sh
/home/vision/miniconda3/envs/ESC/lib/python3.8/site-packages/transformers/modeling_utils.py:1283: FutureWarning: You are using torch.load with weights_only=False (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for weights_only will be flipped to True. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via torch.serialization.add_safe_globals. We recommend you start setting weights_only=True for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
state_dict = torch.load(resolved_archive_file, map_location="cpu")
11/30/2024 12:47:28 - INFO - utils.building_utils - loading finetuned model from ./DATA/strat.strat/2024-11-29080349.3e-05.16.1gpu/epoch-4.bin
/home/vision/Projects/BITS/utils/building_utils.py:56: FutureWarning: You are using torch.load with weights_only=False (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for weights_only will be flipped to True. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via torch.serialization.add_safe_globals. We recommend you start setting weights_only=True for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
model.load_state_dict(torch.load(checkpoint, map_location=torch.device('cpu')))
11/30/2024 12:47:28 - INFO - utils.building_utils - deploying model...

A new conversation starts!
Human: hi
History: {'dialog': [{'text': 'hi', 'speaker': 'usr'}]}
Inputs after convert_data_to_inputs(): []
No inputs generated from history.
Human:

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant