#!/usr/bin/python3 # -*- coding: utf-8 -*- """ Software to screen for drug name similarity using a combination of orthographic and phonetic algorithms. Copyright (C) 2023 Colin Curtain and the Australian Commission on Safety and Quality in Health Care This file is part of LASA: LASA (Look-Alike Sound-Alike) Automated Screening Application. LASA is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. LASA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with LASA. If not, see . """ from array import array import csv import itertools from js import document import pyaline import pyodide_http # import requests import urllib3 import copy # Patch the Requests library so it works with Pyscript pyodide_http.patch_all() # Name item list positions ID = 0 # Trade Product ID or active ingredient ID NAME = 1 # Trade Product or Medicinal Substance Preferred Term or Tall Man name. PROCESSED_NAME = 2 # column in processed_source list # Default settings MODERATE_THRESHOLD = 0.65 # final score Moderate similarity threshold HIGH_THRESHOLD = 0.69 # final score High similarity threshold EXTREME_THRESHOLD = 0.9 # final score Extreme similarity threshold ORTHOGRAPHIC_PERCENT = 0.5 # weighting of orthographic component in final score. 1- this = Aline percent MINIMUM_NAME_LENGTH = 5 def get_tp_data(): """ FHIR json data. Dictionary: resourceType, url, name, status, experimental, copyright, expansion expansion Dictionary of contains, identifier, timestamp,total, offset, parameter """ snomed_version = "Unknown" tp_contains = [] try: headers = { 'Accept': 'application/json', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36' } # response = requests.get( # "https://r4.ontoserver.csiro.au/fhir/ValueSet/$expand?url=http%3A%2F%2Fsnomed.info%2Fsct%3Ffhir_vs%3Decl%2F(%255E%2520929360021000036102%2520)", headers=headers) response = urllib3.request('GET', 'https://r4.ontoserver.csiro.au/fhir/ValueSet/$expand?url=http%3A%2F%2Fsnomed.info%2Fsct%3Ffhir_vs%3Decl%2F(%255E%2520929360021000036102%2520)') tp_response = response.json() except Exception as e: return "Cannot access CSIRO Ontoserver11", tp_contains try: # snomed_url = tp_response['url'] tp_expansion = tp_response['expansion'] snomed_url = tp_expansion['parameter'][0]['valueUri'] tp_contains = tp_expansion['contains'] except KeyError: snomed_version = "Cannot access CSIRO Ontoserver" snomed_url = "" try: version_tmp = snomed_url.split("/version/")[1] snomed_date = version_tmp.split('?')[0] snomed_date = snomed_date[0:4] + "-" + snomed_date[4:6] + "-" + snomed_date[6:] snomed_version = "Snomed version: " + snomed_date + ". " except Exception as e_: snomed_version = "Cannot access CSIRO Ontoserver!!" return snomed_version, tp_contains def get_ingredient_data(): """ FHIR json data. Dictionary: resourceType, url, name, status, experimental, copyright, expansion expansion Dictionary of contains, identifier, timestamp,total, offset, parameter """ snomed_version = "Unknown" ing_contains = [] try: headers = { 'Accept': 'application/json', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', } # response = requests.get( # "https://r4.ontoserver.csiro.au/fhir/ValueSet/$expand?url=http%3A%2F%2Fsnomed.info%2Fsct%3Ffhir_vs%3Decl%2F(%3C30497011000036103%20.700000081000036101%20)") # response = requests.get( # "https://r4.ontoserver.csiro.au/fhir/ValueSet/$expand?url=http%3A%2F%2Fsnomed.info%2Fsct%3Ffhir_vs%3Decl%2F(%255E%2520929360021000036102%2520)", headers=headers) response = urllib3.request('GET', 'https://r4.ontoserver.csiro.au/fhir/ValueSet/$expand?url=http%3A%2F%2Fsnomed.info%2Fsct%3Ffhir_vs%3Decl%2F(%255E%2520929360021000036102%2520)') ing_response = response.json() except Exception as e: return "Cannot access CSIRO Ontoserver1", ing_contains try: # snomed_url = ing_response['url'] ing_expansion = ing_response['expansion'] snomed_url = ing_expansion['parameter'][0]['valueUri'] ing_contains = ing_expansion['contains'] except KeyError: snomed_version = "Cannot access CSIRO Ontoserver" snomed_url = "" try: version_tmp = snomed_url.split("/version/")[1] snomed_date = version_tmp.split('?')[0] snomed_date = snomed_date[0:4] + "-" + snomed_date[4:6] + "-" + snomed_date[6:] snomed_version = "Snomed version: " + snomed_date + ". " except Exception as e_: snomed_version = "Cannot access CSIRO Ontoserver" return snomed_version, ing_contains text_name_content = Element("text-name-content") text_name2_content = Element("text-name2-content") comparison_type_content = Element("comparison-selection") scores_content = Element("scores-selection") def run_comparison(*ags, **kws): if not text_name_content.element.value: return None source_name = text_name_content.element.value source_name = source_name.strip() second_name = text_name2_content.element.value second_name = second_name.strip() if len(source_name) < MINIMUM_NAME_LENGTH: return None comparison_type = comparison_type_content.value scores_type = scores_content.value LasaComparisons(source_name, comparison_type, scores_type, second_name) class LasaComparisons: """ Produces output files for each term in each letter of the alphabet, with a score of similarity between the term of interest and other terms with a sufficiently high score. All Trade Products and active ingredient terms starting with the selected letter(s) are listed, along with any matches and the level of the match. The process follows the algorithms used in the FDA Phonetic and Orthographic Computer Analysis (POCA) program https://www.fda.gov/Drugs/ResourcesForYou/Industry/ucm400127.htm Set up test server in folder: python3 -m http.server http://0.0.0.0:8000 or 127.0.0.1:8000 """ tall_man_names = [] # [id, original name, processed name] AKA Mixed case names tallman_processed_shortlist = [] ingredient_names = [] # [id, original name, processed name] ing_processed_shortlist = [] trade_product_names = [] # [id, original name, processed name] AKA Brand names tp_processed_shortlist = [] name2 = [] # [id, original name, processed name] name2_processed_shortlist = [] def __init__(self, source_name, comparison_selection, scores_selection, name2): """ Load required comparison data, convert names, display table. param: source_name: String comparison_selection: String scores_selection: String second_name: String """ self.duration = "" # For display of processing time seconds self.snomed_version = "" self.final_results = [] self.source_name = [0, source_name, self.process_name(source_name, True)] self.name2 = [[0, name2, self.process_name(name2, False)]] # Comparison type: mixed case names, ingredient names, brand names self.comparison_type = comparison_selection if scores_selection == "Moderate or higher": self.all_scores = False else: self.all_scores = True self.get_data() self.run_comparisons() self.fill_table() def get_data(self): """ Set up data list of names from tall man, and trade products or ingredients. Names list contains list of [id, original name, processed name]. SNOMED concept id is not used but stored for potential future use. Trade product and ingredients are requested from CSIRO Ontoserver. Create processed name shortlists. Shortlists speed comparisons as many Trade Products in particular can be reduced to one shortlisted processed name. """ # Fill tall_man data self.tall_man_names = [] self.tallman_processed_shortlist = [] try: with open("mixedcase.csv") as csvfile: reader = csv.reader(csvfile) for row in reader: self.tall_man_names.append(["", row[0], self.process_name(row[0])]) self.tallman_processed_shortlist.append(self.process_name(row[0])) except FileNotFoundError: # Use older coded list for tm in self.tall_man_base: self.tall_man_names.append(["", tm, self.process_name(tm)]) self.tallman_processed_shortlist.append(self.process_name(tm)) # Get trade product data if self.comparison_type == "Brand names": self.snomed_version, tp_contains = get_tp_data() self.trade_product_names = [] self.tp_processed_shortlist = [] for tp in tp_contains: self.trade_product_names.append([tp['code'], tp['display'], self.process_name(tp['display'])]) self.tp_processed_shortlist.append(self.process_name(tp['display'])) self.tp_processed_shortlist = list(set(self.tp_processed_shortlist)) # Get active ingredient data if self.comparison_type == "Ingredient names": self.snomed_version, ingredient_contains = get_ingredient_data() self.ingredient_names = [] self.ing_processed_shortlist = [] for ingredient in ingredient_contains: self.ingredient_names.append([ingredient['code'], ingredient['display'], self.process_name(ingredient['display'])]) self.ing_processed_shortlist.append(self.process_name(ingredient['display'])) self.ing_processed_shortlist = list(set(self.ing_processed_shortlist)) if self.comparison_type == "Second name": self.name2_processed_shortlist = [self.name2[0][2]] # The processed name in a list object def process_name(self, text, source=False): """ Processing steps: 1 Make lowercase and remove all bracketed terms, digits 2 Remove punctuation, except '-' initally. 3 Keep spaces between words. Replace slashes / with space. 4 Replace double spaces ' ' with one space ' ' 5 Remove single letter suffixes e.g. '-t' 6 Remove known unhelpful words 7 If spaces remain, split and keep only the first word. 8 If process name length differs from processed source length by 4+ characters return "*". Called by: get_data Parameters: text : String name to be converted source : True if source name. False - second list, but requires source name list item Returns: processed name : String """ text = text.lower() text = text.replace(' mg', '') new_text = "" # Remove bracketed text bracketed = False for i in range(0, len(text)): if text[i] == "(": bracketed = True if text[i] == ")": bracketed = False if not bracketed: new_text += text[i] # Remove non-alphabetical text and trim spaces, keep '-' temporarily processed_text = "" for i in range(0, len(new_text)): if new_text[i] == ' ' or new_text[i] == '/': processed_text += ' ' else: if new_text[i].isalpha() or new_text[i] == "-": processed_text += new_text[i] processed_text = processed_text.replace(' ', ' ') processed_text = processed_text.strip() word = processed_text if word != "": word = processed_text.split()[0] # Use only the first word for comparisons # Remove '-character' suffix for 1 letter suffixes # Gets rid of -c, -t if len(word) > 3 and word[-2] == '-': # e.g. aa-x to aa word = word[:-2] word = word.replace('-', '') # Remove known unhelpful names if word in ('activated', 'action', 'after', 'american', 'anthistamine', 'antiinflammatory', 'antifungal', 'antiseptic', 'bandage', 'chelsea', 'clean', 'clear', 'chesty', 'compound', 'congested', 'cream', 'dressing', 'ethyl', 'extra', 'first', 'hairscience', 'inert', 'liquid', 'methyl', 'monobasic', 'prepared', 'sleep', 'strong', 'water'): word = "" # Use first word only if len(word) > 0: word = word.split()[0] # Words meets minimum length if len(word) < MINIMUM_NAME_LENGTH: return "" if not source and abs(len(word) - len(self.source_name[PROCESSED_NAME])) > 3: return "*" return word def run_comparisons(self): """ Compare the source to list of target names, using the defined thresholds for orthographic/phonetic proportions. comparison type: Tall man names, Trade names, Ingredient names all_scores: True all or False Moderate or higher Comparisons are performed against the target shortlist of processed names to increase speed. e.g. June 2023: 12562 trade products is reduced to 5122 shortlist processed names. Score results are then back-filled to the original names lists using the processed name as a matching key. final_results List: [Score, written description (low, moderate, high, extreme), target processed name, target name] """ target_names = [] if self.comparison_type == "Mixed case names": target_names = copy.deepcopy(self.tall_man_names) if self.comparison_type == "Brand names": target_names = copy.deepcopy(self.trade_product_names) if self.comparison_type == "Ingredient names": target_names = copy.deepcopy(self.ingredient_names) if self.comparison_type == "Second name": target_names = copy.deepcopy(self.name2) # Determine which shortlist to use shortlist = self.tallman_processed_shortlist if self.comparison_type == "Ingredient names": shortlist = self.ing_processed_shortlist if self.comparison_type == "Brand names": shortlist = self.tp_processed_shortlist if self.comparison_type == "Second name": shortlist = self.name2_processed_shortlist # Compare each name and add result to list comparison_results_dict = self.compare_names(shortlist) # Link shortlist keys to target names to prepare data for table display result_list = [] for target in target_names: if len(target[PROCESSED_NAME]) >= MINIMUM_NAME_LENGTH: try: result_list.append( comparison_results_dict[target[PROCESSED_NAME]] + [target[PROCESSED_NAME], target[NAME]]) except KeyError: pass ''' Do not append these rows - names too short or name length difference 4+ characters elif target[PROCESSED_NAME] == "*": result_list.append([0,"len diff", "", target[NAME]]) else: result_list.append([0,"Too short", target[PROCESSED_NAME], target[NAME]]) ''' if len(result_list) == 0: result_list = [[0, "No Results", "", ""]] # Sort final results descending by score interim_results = sorted(result_list, key=lambda scores: scores[0], reverse=True) if self.all_scores: self.final_results = interim_results else: self.final_results = [x for x in interim_results if x[0] >= 0.65] # Display processing statistics msg = self.source_name[NAME] if self.source_name[NAME] != self.source_name[PROCESSED_NAME]: msg += f" as: {self.source_name[PROCESSED_NAME]}" msg += f" compared to {self.comparison_type}" if self.snomed_version != "": msg += f". {self.snomed_version}" processing_p = document.getElementById("statistics") processing_p.textContent = msg def compare_names(self, target_names_shortlist): """ Uses BISIM2, ALINE and LEVENSHTEIN. BISIM2 comes from the lingpy software, and is used as a normalised similarity score. Uses (ALINE and BISIM2) and LEVENSHTEIN, with weighting to orthographic and phonetic components. Use 1-LED score to get a high number for closeness rather than distance - so higher scores are more similar. Use 1- BISIM2 score to get a high number for closeness. The pyaline algorithm is the slow step in the comparisons. Have done all that is reasonably possible to speed this step. parameters: target_names_shortlist : List of Strings returns: result_dict : Dictionary of key = processed drug name, value = List of [score and similarity wording] """ result_dict = {} source_aline_string = pyaline.AlineString(self.source_name[PROCESSED_NAME]) for i, target in enumerate(target_names_shortlist): if len(target) >= MINIMUM_NAME_LENGTH: result_bisim2 = 1 - self.bisim2(self.source_name[PROCESSED_NAME], target, normalized=True) lshtein = self.nlevenshtein(self.source_name[PROCESSED_NAME], target) lshtein = 1 - lshtein # For minimum distance target_aline_string = pyaline.AlineString(target) a = pyaline.Aline(source_aline_string, target_aline_string) aline = a.get_normalized_similarity() final_score = ORTHOGRAPHIC_PERCENT * (lshtein + result_bisim2) / 2 + (1 - ORTHOGRAPHIC_PERCENT) * aline similarity_wording = 'low' if final_score >= MODERATE_THRESHOLD: similarity_wording = "moderate" if final_score > HIGH_THRESHOLD: similarity_wording = "high" if final_score > EXTREME_THRESHOLD: similarity_wording = "extreme" short_res = [round(final_score, 4), similarity_wording] else: short_res = [0, "Too short"] result_dict[target] = short_res return result_dict @staticmethod def bisim2(a, b, normalized=True): """ Computes bigram similarity "the comprehensive version". Notes -- Computes the number of common 1-grams between two n-grams. From LingPy compare.strings https://github.com/lingpy/lingpy. """ pad_symbol = "-" n = 2 m = [] la = len(a) + 1 lb = len(b) + 1 s_a = itertools.chain((pad_symbol,) * (n - 1), a) s_a = itertools.chain(s_a, (pad_symbol,) * (n - 1)) s_a = list(s_a) count = max(0, len(s_a) - n + 1) s_a = [tuple(s_a[i:i + n]) for i in range(count)] s_b = itertools.chain((pad_symbol,) * (n - 1), b) s_b = itertools.chain(s_b, (pad_symbol,) * (n - 1)) s_b = list(s_b) count = max(0, len(s_b) - n + 1) s_b = [tuple(s_b[i:i + n]) for i in range(count)] for i in range(0, la): m.append([]) for j in range(0, lb): m[i].append(0) m[i][0] = 0 for i in range(0, lb): m[0][i] = 0 for i in range(1, la): for j in range(1, lb): sim = len([k for k in s_a[i - 1] if k in s_b[j - 1]]) / 2.0 m[i][j] = max(m[i][j - 1], m[i - 1][j], m[i - 1][j - 1] + sim) la = la - 1 lb = lb - 1 if not normalized: return float(max(la, lb)) - float(m[la][lb]) return 1.0 - (float(m[la][lb]) / float(max(la, lb))) @staticmethod def nlevenshtein(seq1, seq2): # In original code, not needed: , method=2): """ Compute the normalized Levenshtein distance between `seq1` and `seq2`. Two normalization methods are provided. For both of them, the normalized distance will be a float between 0 and 1, where 0 means equal and 1 completely different. The computation obeys the following patterns: 0.0 if seq1 == seq2 1.0 if len(seq1) == 0 or len(seq2) == 0 edit distance / factor otherwise The `method` parameter specifies which normalization factor should be used. It can have the value 1 or 2, which correspond to the following: 1: the length of the shortest alignment between the sequences (that is, the length of the longest sequence) 2: the length of the longest alignment between the sequences. LASA uses this method. Which normalization factor should be chosen is a matter of taste. The first one is cheap to compute. The second one is more costly, but it accounts better than the first one for parallelisms of symbols between the sequences. For the rationale behind the use of the second method, see: Heeringa, "Measuring Dialect Pronunciation Differences using Levenshtein Distance", 2004, p. 130 sq, which is available online at: http://www.let.rug.nl/~heeringa/dialectology/thesis/thesis.pdf The nlevenshtein method from: Python Distance 0.1.3 package https://github.com/doukremt/distance https://pypi.python.org/pypi/Distance/ Author: Michael Meyer GNP GPL License """ if seq1 == seq2: return 0.0 len1, len2 = len(seq1), len(seq2) if len1 == 0 or len2 == 0: return 1.0 if len1 < len2: # minimize the arrays size len1, len2 = len2, len1 seq1, seq2 = seq2, seq1 '''if method == 1: return levenshtein(seq1, seq2) / float(len1) if method != 2: raise ValueError("expected either 1 or 2 for `method` parameter")''' column = array('L', range(len2 + 1)) length = array('L', range(len2 + 1)) y = 0 for x in range(1, len1 + 1): column[0] = length[0] = x last = llast = x - 1 for y in range(1, len2 + 1): # Distance old = column[y] ic = column[y - 1] + 1 dc = column[y] + 1 rc = last + (seq1[x - 1] != seq2[y - 1]) column[y] = min(ic, dc, rc) last = old # Length lold = length[y] lic = length[y - 1] + 1 if ic == column[y] else 0 ldc = length[y] + 1 if dc == column[y] else 0 lrc = llast + 1 if rc == column[y] else 0 length[y] = max(ldc, lic, lrc) llast = lold return column[y] / float(length[y]) def fill_table(self): """ Clear and fill html table, located in output div. """ running_msg_span = document.getElementById("running_msg") running_msg_span.textContent = "" output_div = document.getElementById("output") while output_div.firstChild: output_div.removeChild(output_div.firstChild) if len(self.final_results) == 0: p = document.createElement("p") p.textContent = "No results" p.style.fontSize = "16px" p.style.color = "red" output_div.append(p) return table = document.createElement("table") table.id = "results-table" table.style.borderCollapse = "collapse" table.classList.add("table") # header_row = ["Score", "Similarity", "Processed", self.comparison_type] # OLD header_row = ["Score", "Similarity", self.comparison_type] th_row = document.createElement("tr") for col in header_row: th_element = document.createElement("th") th_element.style.border = "1px gray solid" th_element.style.fontSize = "14px" th_element.textContent = col th_row.append(th_element) table.append(th_row) for row in self.final_results: table_row = document.createElement("tr") # table_row.classList.add(row[1]) for i, col in enumerate(row): if i != 2: # Do not show Processed name column td_element = document.createElement("td") if i != 3: td_element.classList.add(row[1]) td_element.style.border = "1px gray solid" td_element.style.padding = "0px 1px 0px 3px" # Top R btm L td_element.style.fontSize = "14px" td_element.textContent = col table_row.append(td_element) table.append(table_row) output_div.append(table) # document.body.appendChild(output_div) # ACSQHC 2020 tallman list and supplementary list tall_man_base = ["aCLin", "actoNEL", "actoS", "aFATinib", "aKAMin", "alDACTONE", "alDOMET", "ALKeran", "alODORM", "amARYl", "amiNOPHYLLine", "amiODAROne", "amiTRIPTYLine", "amLODIPine", "amOXil", "aPomine", "arABLOC", "arATAC", "ARIPiprazole", "arOPAX", "aTRopt", "avaSTIN", "avaXIM", "aVomine", "aXITinib", "azATHIOPRINE", "aziTHROMYCIN", "aZopt", "bARICITinib", "beNRALizumab", "beVACizumab", "beZLOTOXumab", "biNIMEtinib", "bisACODYl", "bisOPROLOl", "buDESONide", "buMETANide", "cABOZANtinib", "caLTRate", "caPTOPRil", "caRAFate", "CARBAMazepine", "carbiMAZOLe", "cARBOplatin", "caRVEDILOl", "cefaLEXin", "cefALOTIN", "cefaZOLin", "cefEPIME", "cefOTAXIME", "cefOXITIN", "ceftAROLine", "cefTAZIDIME", "cefTRIAXONE", "celAPRAM", "celEBREX", "cephaLEXin", "cephaZOLin", "cHLORPROMAZine", "ciclosPORIN", "ciprAMIL", "ciPROFLOXAcin", "ciprOXIN", "ciSplatin", "cLARITHROMYcin", "cloBAZam", "cLOMIFEne", "cLOMIPRAMine", "CLONazepam", "cOBIMEtinib", "coUMADIN", "coVERSYL", "CYCLOPHOSPHamide", "cyclosERINE", "cyclosPORIN", "daBRAFEnib", "daCTINomycin", "daPTomycin", "daSATinib", "DAUNOrubicin", "depoPROVERA", "DEPOmedrol", "dePTRAn", "deRALin", "Diazepam", "dilaNTIN", "dilaUDID", "diPRIVan", "diPYRIDAMOLe", "diSOPYRAMIDe", "diTROPan", "DOCEtaxel", "doSULepin", "doTHiepin", "doXepin", "DOXOrubicin", "DULoxetine", "eCULizumab", "eFALizumab", "eMICizumab", "ERYthromycin", "fluoxetine", "fluVOXAMine", "gliBENCLAMide", "gliCLAZide", "gliMEPIRide", "gliPIZide", "humALOG", "humULIN", "hydrALAZINe", "hydreA", "hydreNE", "hydrOCHLOROTHIAZIDe", "HYDROmorphone", "iDArubicin", "iFOSFamide", "INDEral", "isoptoCARpine", "isoptoHOMATROpine", "iSOtretinoin", "januMET", "januVIA", "ketALAR", "ketOROLAC", "laMICTAl", "laMISil", "lamiVUDine", "lamOTRIGine", "lanTUs", "lanVis", "laPAtinib", "laRGACTil", "leNVAtinib", "LEUKeran", "linCOMYCIN", "linEZOLID", "LORazepam", "loSEC", "loVAn", "mercaptAMine", "mercaptOPURine", "methADONe", "methYLPHENIDATe", "moBILis", "moVALis", "MOXifloxacin", "MSContin", "MYLeran", "NEOral", "nexAVAR", "nexiUM", "niFEDIPine", "niMODIPine", "niZATIDine", "NORfloxacin", "norMISON", "norVASC", "novoMIX", "novoRAPID", "oBINUTUZumab", "oCRELizumab", "oFATUMumab", "oMALizumab", "Oxazepam", "OXCARBazepine", "oxyCONTIN", "oxyNORM", "PACLitaxel", "pANITUMumab", "paRIET", "PARoxetine", "paXTINE", "pAZOPanib", "pERTUZumab", "pEXSIG", "pONATinib", "primaCIN", "primaCOR", "primaXIN", "pRISTIQ", "proCHLORPERazine", "proGRAF", "proMETHazine", "propOFol", "propRANOLol", "proZAC", "QUETIAPine", "RABEprazole", "raMUCIRumab", "raNIBIZumab", "rifaMPICin", "rifaXIMin", "riSPERIDONe", "rOPINIROLe", "sAXagliptin", "SERTRALine", "Sirolimus", "siTagliptin", "SOLUmedrol", "soluCORTEF", "soNIDEGib", "soRAFENib", "sulfaDiazine", "sulfaSALazine", "sUMATRIPTAn", "sUNITinib", "TACrolimus", "tAPENTadol", "tEGRETOl", "tEMOdal", "tOFACitinib", "toFRANIL", "toPAMAX", "tORadol", "tRAMadol", "tRAMEtinib", "tRENTAl", "trimEPRAZINE", "trimETHOPRIM", "valAciclovir", "valGANciclovir", "vinBLASTine", "vinCRISTine", "vinORELBin", "xalaCOM", "xalaTAN", "zinNAt", "zinVit", "zoCOR", "zoLOFT", "zoTON"]