empresa-libre/source/app/controllers/util.py

3018 lines
91 KiB
Python
Raw Normal View History

2018-10-23 23:43:23 -05:00
#!/usr/bin/env python3
2017-06-27 15:43:02 -05:00
2018-06-14 22:20:55 -05:00
# ~ Empresa Libre
# ~ Copyright (C) 2016-2018 Mauricio Baeza Servin (web@correolibre.net)
2018-06-14 22:20:55 -05:00
# ~
# ~ This program is free software: you can redistribute it and/or modify
# ~ it under the terms of the GNU General Public License as published by
# ~ the Free Software Foundation, either version 3 of the License, or
# ~ (at your option) any later version.
# ~
# ~ This program is distributed in the hope that it will be useful,
# ~ but WITHOUT ANY WARRANTY; without even the implied warranty of
# ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# ~ GNU General Public License for more details.
# ~
# ~ You should have received a copy of the GNU General Public License
# ~ along with this program. If not, see <http://www.gnu.org/licenses/>.
2018-11-20 00:03:07 -06:00
import base64
2017-06-27 15:43:02 -05:00
import datetime
import getpass
2017-10-08 22:01:19 -05:00
import hashlib
2018-11-20 00:03:07 -06:00
import io
2017-06-27 15:43:02 -05:00
import json
2017-11-11 13:42:51 -06:00
import locale
2017-06-27 15:43:02 -05:00
import mimetypes
import os
import re
2018-02-13 23:12:21 -06:00
import requests
2017-09-30 23:14:44 -05:00
import sqlite3
2017-10-15 02:30:55 -05:00
import socket
2017-10-08 22:01:19 -05:00
import subprocess
import tempfile
2019-01-10 23:37:37 -06:00
import textwrap
2017-11-06 22:21:14 -06:00
import threading
2017-10-15 02:30:55 -05:00
import time
import unicodedata
2017-06-27 15:43:02 -05:00
import uuid
2017-10-15 18:57:25 -05:00
import zipfile
2017-10-16 00:02:51 -05:00
2017-10-15 18:57:25 -05:00
from io import BytesIO
2018-05-23 23:42:57 -05:00
from math import trunc
2017-12-13 01:13:48 -06:00
from pathlib import Path
2017-10-15 02:30:55 -05:00
from xml.etree import ElementTree as ET
2018-02-07 00:48:06 -06:00
from xml.dom.minidom import parseString
2017-10-15 02:30:55 -05:00
2019-01-10 23:37:37 -06:00
2017-10-25 19:46:13 -05:00
try:
import uno
from com.sun.star.beans import PropertyValue
from com.sun.star.awt import Size
2017-12-17 22:03:19 -06:00
from com.sun.star.view.PaperFormat import LETTER
2017-10-25 19:46:13 -05:00
APP_LIBO = True
2018-06-18 14:10:52 -05:00
except ImportError:
2017-10-25 19:46:13 -05:00
APP_LIBO = False
2017-06-27 15:43:02 -05:00
2021-06-29 19:07:55 -05:00
# ~ import pyqrcode
2017-10-08 22:01:19 -05:00
from dateutil import parser
2018-11-19 01:03:48 -06:00
from lxml import etree
import mako.runtime
from mako.exceptions import TopLevelLookupException
mako.runtime.UNDEFINED = ''
2017-06-27 15:43:02 -05:00
2017-12-10 12:12:06 -06:00
from .helper import CaseInsensitiveDict, NumLet, SendMail, TemplateInvoice, \
2018-01-01 02:14:30 -06:00
SeaFileAPI, PrintTicket
2017-12-13 01:13:48 -06:00
from settings import DEBUG, MV, log, template_lookup, COMPANIES, DB_SAT, \
2017-10-29 16:53:10 -06:00
PATH_XSLT, PATH_XSLTPROC, PATH_OPENSSL, PATH_TEMPLATES, PATH_MEDIA, PRE, \
2017-12-17 22:55:32 -06:00
PATH_XMLSEC, TEMPLATE_CANCEL, DEFAULT_SAT_PRODUCTO, DECIMALES, DIR_FACTURAS
2017-06-27 15:43:02 -05:00
2020-12-30 22:45:57 -06:00
from settings import USAR_TOKEN, API, DECIMALES_TAX
2020-12-31 12:01:41 -06:00
# ~ from .configpac import AUTH
2017-06-27 15:43:02 -05:00
2021-12-30 11:56:22 -06:00
from .utils import get_qr
2018-11-20 00:03:07 -06:00
# ~ v2
2021-06-29 19:07:55 -05:00
import segno
2021-01-02 18:16:15 -06:00
from .pacs.cfdi_cert import SATCertificate
2020-12-31 12:01:41 -06:00
2018-11-20 00:03:07 -06:00
from settings import (
2022-03-10 20:29:50 -06:00
CFDI_VERSIONS,
2018-11-20 23:47:53 -06:00
EXT,
2018-11-20 00:03:07 -06:00
MXN,
PATHS,
2023-01-02 22:59:39 -06:00
PRE_DEFAULT,
2018-11-20 00:03:07 -06:00
)
2017-10-08 22:01:19 -05:00
def _call(args):
return subprocess.check_output(args, shell=True).decode()
2017-10-10 18:49:05 -05:00
def _get_md5(data):
return hashlib.md5(data.encode()).hexdigest()
2017-10-30 13:57:02 -06:00
def save_temp(data, modo='wb'):
2017-10-08 22:01:19 -05:00
path = tempfile.mkstemp()[1]
with open(path, modo) as f:
f.write(data)
return path
2017-11-03 14:09:34 -06:00
def save_file(path, data, modo='wb'):
2017-11-03 20:05:19 -06:00
try:
with open(path, modo) as f:
f.write(data)
return True
except:
return False
2017-11-03 14:09:34 -06:00
2017-10-10 18:49:05 -05:00
def _join(*paths):
return os.path.join(*paths)
def _kill(path):
try:
os.remove(path)
except:
pass
return
2017-06-27 15:43:02 -05:00
def get_pass():
password = getpass.getpass('Introduce la contraseña: ')
pass2 = getpass.getpass('Confirma la contraseña: ')
if password != pass2:
msg = 'Las contraseñas son diferentes'
return False, msg
password = password.strip()
if not password:
msg = 'La contraseña es necesaria'
return False, msg
2017-09-30 23:14:44 -05:00
return True, password
def get_value(arg):
value = input('Introduce el {}: '.format(arg)).strip()
if not value:
msg = 'El {} es requerido'.format(arg)
log.error(msg)
return ''
return value
2017-10-26 13:09:47 -05:00
def _valid_db_companies():
con = sqlite3.connect(COMPANIES)
sql = """
CREATE TABLE IF NOT EXISTS names(
rfc TEXT NOT NULL COLLATE NOCASE UNIQUE,
con TEXT NOT NULL
);
"""
cursor = con.cursor()
cursor.executescript(sql)
cursor.close()
con.close()
return
2017-09-30 23:14:44 -05:00
def _get_args(rfc):
2017-10-26 13:09:47 -05:00
_valid_db_companies()
2017-09-30 23:14:44 -05:00
con = sqlite3.connect(COMPANIES)
cursor = con.cursor()
sql = "SELECT con FROM names WHERE rfc=?"
cursor.execute(sql, (rfc,))
values = cursor.fetchone()
if values is None:
msg = 'No se encontró el RFC'
log.error(msg)
return ''
cursor.close()
con.close()
return values[0]
2017-10-26 13:09:47 -05:00
def get_rfcs():
_valid_db_companies()
con = sqlite3.connect(COMPANIES)
cursor = con.cursor()
sql = "SELECT * FROM names"
cursor.execute(sql)
values = cursor.fetchall()
cursor.close()
con.close()
return values
2017-09-30 23:14:44 -05:00
def get_con(rfc=''):
if not rfc:
rfc = get_value('RFC').upper()
if not rfc:
2017-12-01 12:45:13 -06:00
return {}
2017-09-30 23:14:44 -05:00
args = _get_args(rfc.upper())
if not args:
2017-12-01 12:45:13 -06:00
return {}
2017-09-30 23:14:44 -05:00
return loads(args)
2017-10-04 00:11:49 -05:00
def get_sat_key(table, key):
con = sqlite3.connect(DB_SAT)
cursor = con.cursor()
2017-11-14 20:38:20 -06:00
sql = 'SELECT key, name FROM {} WHERE key=?'.format(table)
2017-10-04 00:11:49 -05:00
cursor.execute(sql, (key,))
data = cursor.fetchone()
cursor.close()
con.close()
if data is None:
return {'ok': False, 'text': 'No se encontró la clave'}
return {'ok': True, 'text': data[1]}
2017-12-19 01:22:22 -06:00
def get_sat_monedas(key):
con = sqlite3.connect(DB_SAT)
con.row_factory = sqlite3.Row
cursor = con.cursor()
filtro = '%{}%'.format(key)
sql = "SELECT * FROM monedas WHERE key LIKE ? OR name LIKE ?"
cursor.execute(sql, [filtro, filtro])
data = cursor.fetchall()
cursor.close()
con.close()
if data is None:
return ()
data = [dict(r) for r in data]
return tuple(data)
2017-11-14 20:04:01 -06:00
def get_sat_unidades(key):
con = sqlite3.connect(DB_SAT)
con.row_factory = sqlite3.Row
cursor = con.cursor()
filtro = '%{}%'.format(key)
sql = "SELECT * FROM unidades WHERE key LIKE ? OR name LIKE ?"
cursor.execute(sql, [filtro, filtro])
data = cursor.fetchall()
cursor.close()
con.close()
if data is None:
2017-11-14 20:38:20 -06:00
return ()
data = [dict(r) for r in data]
return tuple(data)
def get_sat_unidadespeso(key):
con = sqlite3.connect(DB_SAT)
con.row_factory = sqlite3.Row
cursor = con.cursor()
filtro = '%{}%'.format(key)
sql = "SELECT * FROM unidad_peso WHERE key LIKE ? OR name LIKE ?"
cursor.execute(sql, [filtro, filtro])
data = cursor.fetchall()
cursor.close()
con.close()
if data is None:
return ()
data = tuple([dict(r) for r in data])
return data
2017-11-14 20:38:20 -06:00
def get_sat_productos(key):
con = sqlite3.connect(DB_SAT)
con.row_factory = sqlite3.Row
cursor = con.cursor()
filtro = '%{}%'.format(key)
sql = "SELECT * FROM productos WHERE key LIKE ? OR name LIKE ?"
cursor.execute(sql, [filtro, filtro])
data = cursor.fetchall()
cursor.close()
con.close()
if data is None:
return ()
2017-11-14 20:04:01 -06:00
data = [dict(r) for r in data]
return tuple(data)
2017-06-27 15:43:02 -05:00
def now():
2023-02-18 12:40:05 -06:00
n = datetime.datetime.now().replace(microsecond=0)
return n
2017-06-27 15:43:02 -05:00
2017-12-21 01:13:28 -06:00
def today():
return datetime.date.today()
2017-06-27 15:43:02 -05:00
def get_token():
return _get_hash(uuid.uuid4().hex)
def get_mimetype(path):
mt = mimetypes.guess_type(path)[0]
return mt or 'application/octet-stream'
def is_file(path):
return os.path.isfile(path)
def get_stream(path):
return get_file(path), get_size(path)
def get_file(path):
return open(path, 'rb')
2018-01-14 20:28:19 -06:00
def get_files(path, ext='xml'):
docs = []
for folder, _, files in os.walk(path):
pattern = re.compile('\.{}'.format(ext), re.IGNORECASE)
docs += [os.path.join(folder, f) for f in files if pattern.search(f)]
2018-01-14 20:28:19 -06:00
return tuple(docs)
2017-10-23 00:45:41 -05:00
def read_file(path, mode='rb'):
return open(path, mode).read()
2017-06-27 15:43:02 -05:00
def get_size(path):
return os.path.getsize(path)
def get_template(name, data={}):
2017-12-31 00:17:20 -06:00
# ~ print ('NAME', name)
2017-06-27 15:43:02 -05:00
template = template_lookup.get_template(name)
return template.render(**data)
2017-10-23 00:45:41 -05:00
def get_custom_styles(name, default='plantilla_factura.json'):
2017-10-25 19:46:13 -05:00
path = _join(PATH_MEDIA, 'templates', name.lower())
2017-10-15 02:30:55 -05:00
if is_file(path):
2017-10-23 00:45:41 -05:00
with open(path) as fh:
return loads(fh.read())
2017-10-15 02:30:55 -05:00
path = _join(PATH_TEMPLATES, default)
if is_file(path):
2017-10-23 00:45:41 -05:00
with open(path) as fh:
return loads(fh.read())
2017-10-15 02:30:55 -05:00
2017-10-23 00:45:41 -05:00
return {}
2017-10-15 02:30:55 -05:00
2017-10-25 19:46:13 -05:00
def get_template_ods(name, default='plantilla_factura.ods'):
path = _join(PATH_MEDIA, 'templates', name.lower())
if is_file(path):
return path
2018-08-30 19:24:33 -05:00
if 'pagos' in name:
default='plantilla_pagos.ods'
2017-10-25 19:46:13 -05:00
path = _join(PATH_TEMPLATES, default)
if is_file(path):
return path
return ''
2017-06-27 15:43:02 -05:00
def dumps(data):
return json.dumps(data, default=str)
2017-09-30 23:14:44 -05:00
def loads(data):
return json.loads(data)
2018-01-14 20:28:19 -06:00
def import_json(path):
return loads(read_file(path, 'r'))
2017-06-27 15:43:02 -05:00
def clean(values):
for k, v in values.items():
if isinstance(v, str):
values[k] = v.strip()
return values
2017-09-30 00:22:55 -05:00
def parse_con(values):
data = values.split('|')
try:
con = {'type': data[0]}
if con['type'] == 'sqlite':
con['name'] = data[1]
else:
if data[1]:
con['host'] = data[1]
if data[2]:
con['port'] = data[2]
con['name'] = data[3]
con['user'] = data[4]
con['password'] = data[5]
return con
except IndexError:
return {}
def spaces(value):
2018-01-13 18:39:22 -06:00
return '\n'.join([' '.join(l.split()) for l in value.split('\n')])
def to_slug(string):
value = (unicodedata.normalize('NFKD', string)
.encode('ascii', 'ignore')
.decode('ascii').lower())
2017-10-15 02:30:55 -05:00
return value.replace(' ', '_')
2017-10-08 22:01:19 -05:00
2017-10-25 22:26:18 -05:00
def timbra_xml(xml, auth):
2017-10-10 18:49:05 -05:00
from .pac import Finkok as PAC
if not DEBUG and not auth:
msg = 'Sin datos para timbrar'
2020-01-21 23:25:13 -06:00
result = {'ok': False, 'error': msg}
return result
2017-10-25 22:26:18 -05:00
2017-10-10 18:49:05 -05:00
result = {'ok': True, 'error': ''}
2017-10-25 22:26:18 -05:00
pac = PAC(auth)
2018-02-22 14:37:40 -06:00
new_xml = pac.timbra_xml(xml)
if not new_xml:
2017-10-10 18:49:05 -05:00
result['ok'] = False
result['error'] = pac.error
2018-02-22 14:37:40 -06:00
if pac.error.startswith('413'):
return _ecodex_timbra_xml(xml)
else:
return result
2017-10-10 18:49:05 -05:00
2018-02-22 14:37:40 -06:00
result['xml'] = new_xml
2017-10-10 18:49:05 -05:00
result['uuid'] = pac.uuid
result['fecha'] = pac.fecha
return result
2017-10-15 02:30:55 -05:00
2018-02-22 14:37:40 -06:00
def _get_uuid_fecha(xml):
doc = parse_xml(xml)
version = doc.attrib['Version']
node = doc.find('{}Complemento/{}TimbreFiscalDigital'.format(
PRE[version], PRE['TIMBRE']))
return node.attrib['UUID'], node.attrib['FechaTimbrado']
2021-02-09 22:34:15 -06:00
# ~ def get_sat(xml):
# ~ from .pac import get_status_sat
# ~ return get_status_sat(xml)
2017-10-28 23:37:08 -05:00
2017-10-15 02:30:55 -05:00
class LIBO(object):
HOST = 'localhost'
PORT = '8100'
ARG = 'socket,host={},port={};urp;StarOffice.ComponentContext'.format(
HOST, PORT)
CMD = ['soffice',
'-env:SingleAppInstance=false',
'-env:UserInstallation=file:///tmp/LIBO_Process8100',
'--headless', '--norestore', '--nologo', '--accept={}'.format(ARG)]
2018-05-08 13:20:18 -05:00
CELL_STYLE = {
'EUR': 'euro',
}
2017-10-15 02:30:55 -05:00
def __init__(self):
self._app = None
self._start_office()
self._init_values()
def _init_values(self):
2017-11-05 19:53:27 -06:00
self._es_pre = False
2017-10-15 02:30:55 -05:00
self._ctx = None
self._sm = None
self._desktop = None
2018-11-20 00:03:07 -06:00
self._currency = MXN
2018-06-03 00:00:04 -05:00
self._total_cantidades = 0
2017-10-15 02:30:55 -05:00
if self.is_running:
ctx = uno.getComponentContext()
service = 'com.sun.star.bridge.UnoUrlResolver'
resolver = ctx.ServiceManager.createInstanceWithContext(service, ctx)
self._ctx = resolver.resolve('uno:{}'.format(self.ARG))
self._sm = self._ctx.ServiceManager
self._desktop = self._create_instance('com.sun.star.frame.Desktop')
return
def _create_instance(self, name, with_context=True):
if with_context:
instance = self._sm.createInstanceWithContext(name, self._ctx)
else:
instance = self._sm.createInstance(name)
return instance
@property
def is_running(self):
try:
s = socket.create_connection((self.HOST, self.PORT), 5.0)
s.close()
return True
except ConnectionRefusedError:
return False
def _start_office(self):
if self.is_running:
return
for i in range(3):
self.app = subprocess.Popen(self.CMD,
2017-10-15 02:30:55 -05:00
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(5)
if self.is_running:
break
2017-10-15 02:30:55 -05:00
return
def _set_properties(self, properties):
pl = []
for k, v in properties.items():
pv = PropertyValue()
pv.Name = k
pv.Value = v
pl.append(pv)
return tuple(pl)
def _doc_open(self, path, options):
options = self._set_properties(options)
path = self._path_url(path)
try:
doc = self._desktop.loadComponentFromURL(path, '_blank', 0, options)
return doc
except:
return None
def _path_url(self, path):
if path.startswith('file://'):
return path
return uno.systemPathToFileUrl(path)
def close(self):
if self.is_running:
if not self._desktop is None:
self._desktop.terminate()
if not self._app is None:
self._app.terminate()
return
def _read(self, path):
try:
return open(path, 'rb').read()
except:
return b''
def _clean(self):
self._sd.SearchRegularExpression = True
self._sd.setSearchString("\{(\w.+)\}")
self._search.replaceAll(self._sd)
return
def _cancelado(self, cancel):
if not cancel:
pd = self._sheet.getDrawPage()
if pd.getCount():
pd.remove(pd.getByIndex(0))
return
def _set_search(self):
self._sheet = self._template.getSheets().getByIndex(0)
try:
self._search = self._sheet.getPrintAreas()[0]
except IndexError:
self._search = self._sheet.getRangeAddress()
2017-10-15 02:30:55 -05:00
self._search = self._sheet.getCellRangeByPosition(
self._search.StartColumn,
self._search.StartRow,
self._search.EndColumn,
self._search.EndRow
)
self._sd = self._sheet.createSearchDescriptor()
2018-01-23 16:10:22 -06:00
try:
self._sd.SearchCaseSensitive = False
except:
print ('SD', self._sd)
2017-10-15 02:30:55 -05:00
return
2017-10-15 17:20:20 -05:00
def _next_cell(self, cell):
col = cell.getCellAddress().Column
row = cell.getCellAddress().Row + 1
return self._sheet.getCellByPosition(col, row)
def _copy_cell(self, cell):
destino = self._next_cell(cell)
self._sheet.copyRange(destino.getCellAddress(), cell.getRangeAddress())
return destino
2017-10-15 02:30:55 -05:00
def _set_cell(self, k='', v=None, cell=None, value=False):
if k:
self._sd.setSearchString(k)
ranges = self._search.findAll(self._sd)
if ranges:
ranges = ranges.getRangeAddressesAsString().split(';')
for r in ranges:
for c in r.split(','):
cell = self._sheet.getCellRangeByName(c)
if v is None:
return cell
if cell.getImplementationName() == 'ScCellObj':
pattern = re.compile(k, re.IGNORECASE)
nv = pattern.sub(v, cell.getString())
if value:
cell.setValue(nv)
else:
cell.setString(nv)
return cell
if cell:
if cell.getImplementationName() == 'ScCellObj':
ca = cell.getCellAddress()
new_cell = self._sheet.getCellByPosition(ca.Column, ca.Row + 1)
if value:
new_cell.setValue(v)
else:
new_cell.setString(v)
return new_cell
def _comprobante(self, data):
for k, v in data.items():
2018-02-06 12:07:35 -06:00
if k.lower() in ('total', 'descuento', 'subtotal', 'totalgravado', 'totalexento'):
2017-10-15 02:30:55 -05:00
self._set_cell('{cfdi.%s}' % k, v, value=True)
else:
self._set_cell('{cfdi.%s}' % k, v)
return
2022-12-31 17:30:01 -06:00
def _informacion_global(self, data):
for k, v in data.items():
print(k, v)
self._set_cell('{cfdi.%s}' % k, v)
return
2017-10-15 17:20:20 -05:00
def _emisor(self, data):
for k, v in data.items():
self._set_cell('{emisor.%s}' % k, v)
return
def _receptor(self, data):
for k, v in data.items():
2018-02-06 12:07:35 -06:00
if k.lower() in ('salariobasecotapor', 'salariodiariointegrado'):
self._set_cell('{receptor.%s}' % k, v, value=True)
else:
self._set_cell('{receptor.%s}' % k, v)
2017-10-15 17:20:20 -05:00
return
2018-01-23 16:25:01 -06:00
def _copy_row(self, cell):
row = cell.getCellAddress().Row
source = self._sheet.getRows().getByIndex(row)
nc = self._next_cell(cell)
self._sheet.copyRange(nc.getCellAddress(), source.getRangeAddress())
return
2018-02-03 01:23:05 -06:00
def _clean_rows(self, row, count):
for i in range(count):
source = self._sheet.getRows().getByIndex(row + i)
2018-02-16 22:16:46 -06:00
source.clearContents(5)
2018-02-03 01:23:05 -06:00
return
2018-01-27 02:45:45 -06:00
def _copy_paste_rows(self, cell, count):
dispatch = self._create_instance('com.sun.star.frame.DispatchHelper')
row = cell.getCellAddress().Row
source = self._sheet.getRows().getByIndex(row)
self._template.getCurrentController().select(source)
frame = self._template.getCurrentController().getFrame()
dispatch.executeDispatch(frame, '.uno:Copy', '', 0, ())
target = self._sheet.getCellRangeByPosition(0, row + 1, 0, row + count)
self._template.getCurrentController().select(target)
dispatch.executeDispatch(frame, '.uno:Paste', '', 0, ())
return
2018-05-08 13:20:18 -05:00
def _get_style(self, cell):
if cell is None:
return ''
match = re.match(r"([a-z]+)([0-9]+)", cell.CellStyle, re.I)
if not match:
return ''
currency = self.CELL_STYLE.get(self._currency, 'peso')
return '{}{}'.format(currency, match.groups()[1])
2019-02-04 22:13:11 -06:00
def _conceptos(self, data, pakings):
2017-10-15 17:20:20 -05:00
first = True
2018-01-27 02:45:45 -06:00
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
2018-02-06 12:07:35 -06:00
col7 = []
2019-02-04 22:13:11 -06:00
col8 = []
2018-05-08 13:20:18 -05:00
count = len(data) - 1
2019-02-04 22:13:11 -06:00
for i, concepto in enumerate(data):
2017-10-15 17:20:20 -05:00
key = concepto.get('noidentificacion', '')
description = concepto['descripcion']
unidad = concepto['unidad']
cantidad = concepto['cantidad']
valor_unitario = concepto['valorunitario']
importe = concepto['importe']
2018-03-01 22:29:16 -06:00
descuento = concepto.get('descuento', '0.0')
2017-10-15 17:20:20 -05:00
if first:
first = False
cell_1 = self._set_cell('{noidentificacion}', key)
cell_2 = self._set_cell('{descripcion}', description)
cell_3 = self._set_cell('{unidad}', unidad)
cell_4 = self._set_cell('{cantidad}', cantidad, value=True)
cell_5 = self._set_cell('{valorunitario}', valor_unitario, value=True)
cell_6 = self._set_cell('{importe}', importe, value=True)
2018-02-06 12:07:35 -06:00
cell_7 = self._set_cell('{descuento}', descuento, value=True)
2019-02-04 22:13:11 -06:00
if pakings:
cell_8 = self._set_cell('{empaque}', pakings[i], value=True)
2018-01-23 16:10:22 -06:00
if len(data) > 1:
row = cell_1.getCellAddress().Row + 1
2018-01-27 02:45:45 -06:00
self._sheet.getRows().insertByIndex(row, count)
self._copy_paste_rows(cell_1, count)
row = cell_1.getCellAddress().Row
2018-01-23 16:10:22 -06:00
else:
2018-01-27 02:45:45 -06:00
col1.append((key,))
col2.append((description,))
col3.append((unidad,))
col4.append((float(cantidad),))
col5.append((float(valor_unitario),))
col6.append((float(importe),))
2018-02-06 12:07:35 -06:00
col7.append((float(descuento),))
2019-02-04 22:13:11 -06:00
if pakings:
col8.append((pakings[i],))
2018-06-03 00:00:04 -05:00
self._total_cantidades += float(cantidad)
2023-03-06 16:09:37 -06:00
if not count:
2023-03-07 11:53:04 -06:00
if not cell_5 is None:
cell_5.CellStyle = self._get_style(cell_5)
if not cell_6 is None:
cell_6.CellStyle = self._get_style(cell_6)
return
2018-05-08 13:20:18 -05:00
style_5 = self._get_style(cell_5)
style_6 = self._get_style(cell_6)
style_7 = self._get_style(cell_7)
2019-02-04 22:13:11 -06:00
style_8 = ''
if pakings:
style_8 = self._get_style(cell_8)
2018-05-08 13:20:18 -05:00
2018-01-27 02:45:45 -06:00
col = cell_1.getCellAddress().Column
target1 = self._sheet.getCellRangeByPosition(col, row+1, col, row+count)
col = cell_2.getCellAddress().Column
target2 = self._sheet.getCellRangeByPosition(col, row+1, col, row+count)
col = cell_3.getCellAddress().Column
target3 = self._sheet.getCellRangeByPosition(col, row+1, col, row+count)
col = cell_4.getCellAddress().Column
target4 = self._sheet.getCellRangeByPosition(col, row+1, col, row+count)
col = cell_5.getCellAddress().Column
target5 = self._sheet.getCellRangeByPosition(col, row+1, col, row+count)
col = cell_6.getCellAddress().Column
target6 = self._sheet.getCellRangeByPosition(col, row+1, col, row+count)
2018-05-08 13:20:18 -05:00
target7 = None
2019-02-04 22:13:11 -06:00
target8 = None
2018-05-08 13:20:18 -05:00
if not cell_7 is None:
col = cell_7.getCellAddress().Column
target7 = self._sheet.getCellRangeByPosition(col, row+1, col, row+count)
2021-04-13 22:29:04 -05:00
if pakings and cell_8:
2019-02-04 22:13:11 -06:00
col = cell_8.getCellAddress().Column
target8 = self._sheet.getCellRangeByPosition(col, row+1, col, row+count)
2018-01-27 02:45:45 -06:00
target1.setFormulaArray(tuple(col1))
target2.setDataArray(tuple(col2))
target3.setFormulaArray(tuple(col3))
target4.setDataArray(tuple(col4))
target5.setDataArray(tuple(col5))
target6.setDataArray(tuple(col6))
2018-05-08 13:20:18 -05:00
if not target7 is None:
target7.setDataArray(tuple(col7))
2019-02-04 22:13:11 -06:00
if not target8 is None:
target8.setDataArray(tuple(col8))
2018-05-08 13:20:18 -05:00
if style_5:
cell_5.CellStyle = style_5
target5.CellStyle = style_5
if style_6:
cell_6.CellStyle = style_6
target6.CellStyle = style_6
if style_7:
cell_7.CellStyle = style_7
target7.CellStyle = style_7
2019-02-04 22:13:11 -06:00
if style_8:
cell_8.CellStyle = style_8
target8.CellStyle = style_8
2017-10-15 17:20:20 -05:00
return
2017-12-31 00:17:20 -06:00
def _add_totales(self, data):
currency = data['moneda']
value = data['total']
cell_value = self._set_cell('{total}', value, value=True)
if cell_value is None:
return False
cell_value.CellStyle = currency
return True
2017-10-15 17:20:20 -05:00
def _totales(self, data):
2018-01-11 00:35:32 -06:00
cell_styles = {
'EUR': 'euro',
}
2017-10-15 17:20:20 -05:00
currency = data['moneda']
2018-06-03 00:00:04 -05:00
self._set_cell('{total_cantidades}', str(self._total_cantidades))
2018-08-30 19:24:33 -05:00
if self._pagos:
return
2017-10-15 17:20:20 -05:00
cell_title = self._set_cell('{subtotal.titulo}', 'SubTotal')
value = data['subtotal']
cell_value = self._set_cell('{subtotal}', value, value=True)
2017-12-31 00:17:20 -06:00
if not cell_value is None:
2018-01-11 00:35:32 -06:00
cell_value.CellStyle = cell_styles.get(currency, 'peso')
2017-10-15 17:20:20 -05:00
#~ Si encuentra el campo {total}, se asume que los totales e impuestos
#~ están declarados de forma independiente cada uno
2017-12-31 00:17:20 -06:00
if self._add_totales(data):
return
2017-10-15 17:20:20 -05:00
#~ Si no se encuentra, copia las celdas hacia abajo de
#~ {subtotal.titulo} y {subtotal}
2017-11-16 01:17:22 -06:00
#~ print (data['descuento'])
2017-10-15 17:20:20 -05:00
if 'descuento' in data:
self._copy_cell(cell_title)
self._copy_cell(cell_value)
cell_title = self._set_cell(v='Descuento', cell=cell_title)
value = data['descuento']
cell_value = self._set_cell(v=value, cell=cell_value, value=True)
cell_value.CellStyle = currency
for tax in data['traslados']:
self._copy_cell(cell_title)
self._copy_cell(cell_value)
cell_title = self._set_cell(v=tax[0], cell=cell_title)
cell_value = self._set_cell(v=tax[1], cell=cell_value, value=True)
cell_value.CellStyle = currency
for tax in data['retenciones']:
self._copy_cell(cell_title)
self._copy_cell(cell_value)
cell_title = self._set_cell(v=tax[0], cell=cell_title)
cell_value = self._set_cell(v=tax[1], cell=cell_value, value=True)
cell_value.CellStyle = currency
for tax in data['taxlocales']:
self._copy_cell(cell_title)
self._copy_cell(cell_value)
cell_title = self._set_cell(v=tax[0], cell=cell_title)
cell_value = self._set_cell(v=tax[1], cell=cell_value, value=True)
cell_value.CellStyle = currency
self._copy_cell(cell_title)
self._copy_cell(cell_value)
cell_title = self._set_cell(v='Total', cell=cell_title)
value = data['total']
cell_value = self._set_cell(v=value, cell=cell_value, value=True)
cell_value.CellStyle = currency
return
2017-10-15 02:30:55 -05:00
def _timbre(self, data):
2017-12-31 00:17:20 -06:00
if self._es_pre or self._is_ticket:
2017-11-05 19:53:27 -06:00
return
2021-06-29 19:07:55 -05:00
qr = data.pop('cbb')
2017-10-15 02:30:55 -05:00
for k, v in data.items():
self._set_cell('{timbre.%s}' % k, v)
2017-10-15 17:20:20 -05:00
pd = self._sheet.getDrawPage()
image = self._template.createInstance('com.sun.star.drawing.GraphicObjectShape')
2018-02-05 23:59:49 -06:00
gp = self._create_instance('com.sun.star.graphic.GraphicProvider')
2017-10-15 17:20:20 -05:00
pd.add(image)
2021-06-29 19:07:55 -05:00
instance = 'com.sun.star.io.SequenceInputStream'
stream = self._create_instance(instance)
stream.initialize((uno.ByteSequence(qr.getvalue()),))
properties = self._set_properties({'InputStream': stream})
2018-02-05 23:59:49 -06:00
image.Graphic = gp.queryGraphic(properties)
2021-06-29 19:07:55 -05:00
2017-10-15 17:20:20 -05:00
s = Size()
2023-01-06 19:04:50 -06:00
s.Width = 4000
s.Height = 4000
2017-10-15 17:20:20 -05:00
image.setSize(s)
image.Anchor = self._set_cell('{timbre.cbb}')
2023-01-06 19:04:50 -06:00
2017-10-15 02:30:55 -05:00
return
2017-11-25 20:26:15 -06:00
def _donataria(self, data):
if not data:
return
for k, v in data.items():
self._set_cell('{donataria.%s}' % k, v)
return
2017-11-26 00:15:14 -06:00
def _ine(self, data):
if not data:
return
for k, v in data.items():
self._set_cell('{ine.%s}' % k, v)
return
2019-02-15 14:38:41 -06:00
def _divisas(self, data):
if data:
for k, v in data.items():
self._set_cell(f'{{divisas.{k}}}', v)
return
2020-03-02 17:27:02 -06:00
def _leyendas(self, data):
if not data:
return
first = True
for row in data:
leyenda = row['textoLeyenda']
norma = row.get('norma', '')
disposicion = row.get('disposicionFiscal', '')
if first:
first = False
cell1 = self._set_cell('{textoLeyenda}', leyenda)
cell2 = self._set_cell('{norma}', norma)
cell3 = self._set_cell('{disposicionFiscal}', disposicion)
else:
row = cell1.CellAddress.Row + 1
self._sheet.getRows().insertByIndex(row, 1)
cell1 = self._set_cell(v=leyenda, cell=cell1)
cell2 = self._set_cell(v=norma, cell=cell2)
cell3 = self._set_cell(v=disposicion, cell=cell3)
return
def _carta_porte(self, data):
if not data:
return
# ~ print(data)
figuras = data.pop('figuras')
mercancias = data.pop('mercancias')
detalle = mercancias.pop('detalle')
mercancias = mercancias.pop('mercancias')
autotransporte = data.pop('autotransporte')
ubicaciones = data.pop('ubicaciones')
for k, v in data.items():
self._set_cell(f'{{cp.{k}}}', v)
for k, v in figuras.items():
self._set_cell(f'{{cp.{k}}}', v)
for k, v in autotransporte.items():
self._set_cell(f'{{cp.{k}}}', v)
for k, v in mercancias.items():
self._set_cell(f'{{cp.{k}}}', v)
first = True
2022-01-20 00:06:05 -06:00
count = len(ubicaciones) - 1
for i, ubicacion in enumerate(ubicaciones):
tipo = ubicacion['TipoUbicacion']
nombre = ubicacion['NombreRemitenteDestinatario']
rfc = ubicacion['RFCRemitenteDestinatario']
nombre_rfc = f"{nombre} ({rfc})"
fecha = ubicacion['FechaHoraSalidaLlegada']
domicilio = ubicacion['domicilio']
if first:
first = False
cell_1 = self._set_cell('{cp.TipoUbicacion}', tipo)
cell_2 = self._set_cell('{cp.NombreRemitenteDestinatario}', nombre)
cell_3 = self._set_cell('{cp.RFCRemitenteDestinatario}', rfc)
cell_4 = self._set_cell('{cp.FechaHoraSalidaLlegada}', fecha)
cell_5 = self._set_cell('{cp.Domicilio}', domicilio)
row = cell_1.CellAddress.Row + 1
2022-01-20 00:06:05 -06:00
self._sheet.getRows().insertByIndex(row, count)
self._copy_paste_rows(cell_1, count)
else:
cell_1 = self._set_cell(v=tipo, cell=cell_1)
cell_2 = self._set_cell(v=nombre, cell=cell_2)
cell_3 = self._set_cell(v=rfc, cell=cell_3)
cell_4 = self._set_cell(v=fecha, cell=cell_4)
cell_5 = self._set_cell(v=domicilio, cell=cell_5)
first = True
2022-01-20 00:06:05 -06:00
count = len(detalle) - 1
for i, mercancia in enumerate(detalle):
clave = mercancia['BienesTransp']
descripcion = mercancia['Descripcion']
unidad = mercancia['ClaveUnidad']
cantidad = mercancia['Cantidad']
peso = mercancia['PesoEnKg']
if first:
first = False
cell_1 = self._set_cell('{cp.BienesTransp}', clave)
cell_2 = self._set_cell('{cp.Descripcion}', descripcion)
cell_3 = self._set_cell('{cp.ClaveUnidad}', unidad)
cell_4 = self._set_cell('{cp.Cantidad}', cantidad)
cell_5 = self._set_cell('{cp.PesoEnKg}', peso)
2022-01-20 00:06:05 -06:00
if count > 0:
row = cell_1.CellAddress.Row + 1
2022-01-20 00:06:05 -06:00
self._sheet.getRows().insertByIndex(row, count)
self._copy_paste_rows(cell_1, count)
else:
cell_1 = self._set_cell(v=clave, cell=cell_1)
cell_2 = self._set_cell(v=descripcion, cell=cell_2)
cell_3 = self._set_cell(v=unidad, cell=cell_3)
cell_4 = self._set_cell(v=cantidad, cell=cell_4)
cell_5 = self._set_cell(v=peso, cell=cell_5)
return
2018-02-02 13:46:15 -06:00
def _nomina(self, data):
2018-02-03 01:23:05 -06:00
if not data:
return
percepciones = data.pop('percepciones', [])
deducciones = data.pop('deducciones', [])
otrospagos = data.pop('otrospagos', [])
2018-02-16 22:16:46 -06:00
incapacidades = data.pop('incapacidades', [])
2018-02-03 01:23:05 -06:00
for k, v in data.items():
2018-02-06 12:07:35 -06:00
if k.lower() in ('totalpercepciones', 'totaldeducciones',
'totalotrospagos', 'subsidiocausado'):
self._set_cell('{nomina.%s}' % k, v, value=True)
else:
self._set_cell('{nomina.%s}' % k, v)
2018-02-03 01:23:05 -06:00
count = len(percepciones)
if len(deducciones) > count:
count = len(deducciones)
count -= 1
first = True
2018-04-09 00:47:14 -05:00
separacion = {}
2018-02-03 01:23:05 -06:00
for r in percepciones:
2018-04-09 00:47:14 -05:00
if 'TotalPagado' in r:
separacion = r
continue
2018-02-03 01:23:05 -06:00
tipo = r.get('TipoPercepcion')
concepto = r.get('Concepto')
gravado = r.get('ImporteGravado')
exento = r.get('ImporteExento')
if first:
first = False
cell_1 = self._set_cell('{percepcion.TipoPercepcion}', tipo)
cell_2 = self._set_cell('{percepcion.Concepto}', concepto)
cell_3 = self._set_cell('{percepcion.ImporteGravado}', gravado, value=True)
cell_4 = self._set_cell('{percepcion.ImporteExento}', exento, value=True)
if count:
row = cell_1.getCellAddress().Row + 1
self._sheet.getRows().insertByIndex(row, count)
self._copy_paste_rows(cell_1, count)
self._clean_rows(row, count)
else:
cell_1 = self._set_cell(v=tipo, cell=cell_1)
cell_2 = self._set_cell(v=concepto, cell=cell_2)
cell_3 = self._set_cell(v=gravado, cell=cell_3, value=True)
cell_4 = self._set_cell(v=exento, cell=cell_4, value=True)
first = True
for r in deducciones:
tipo = r.get('TipoDeduccion')
concepto = r.get('Concepto')
importe = r.get('Importe')
if first:
first = False
cell_1 = self._set_cell('{deduccion.TipoDeduccion}', tipo)
cell_2 = self._set_cell('{deduccion.Concepto}', concepto)
cell_3 = self._set_cell('{deduccion.Importe}', importe, value=True)
else:
cell_1 = self._set_cell(v=tipo, cell=cell_1)
cell_2 = self._set_cell(v=concepto, cell=cell_2)
cell_3 = self._set_cell(v=importe, cell=cell_3, value=True)
count = len(otrospagos) - 1
first = True
for r in otrospagos:
tipo = r.get('TipoOtroPago')
concepto = r.get('Concepto')
importe = r.get('Importe')
if first:
first = False
cell_1 = self._set_cell('{otropago.TipoOtroPago}', tipo)
cell_2 = self._set_cell('{otropago.Concepto}', concepto)
cell_3 = self._set_cell('{otropago.Importe}', importe, value=True)
if count:
row = cell_1.getCellAddress().Row + 1
self._sheet.getRows().insertByIndex(row, count)
self._copy_paste_rows(cell_1, count)
self._clean_rows(row, count)
else:
cell_1 = self._set_cell(v=tipo, cell=cell_1)
cell_2 = self._set_cell(v=concepto, cell=cell_2)
cell_3 = self._set_cell(v=importe, cell=cell_3, value=True)
2018-02-16 22:16:46 -06:00
count = len(incapacidades) - 1
first = True
for r in incapacidades:
tipo = r.get('TipoIncapacidad')
days = r.get('DiasIncapacidad')
importe = r.get('ImporteMonetario')
if first:
first = False
cell_1 = self._set_cell('{incapacidad.TipoIncapacidad}', tipo)
cell_2 = self._set_cell('{incapacidad.DiasIncapacidad}', days)
cell_3 = self._set_cell('{incapacidad.ImporteMonetario}', importe, value=True)
# ~ if count:
# ~ row = cell_1.getCellAddress().Row + 1
# ~ self._sheet.getRows().insertByIndex(row, count)
# ~ self._copy_paste_rows(cell_1, count)
# ~ self._clean_rows(row, count)
# ~ else:
# ~ cell_1 = self._set_cell(v=tipo, cell=cell_1)
# ~ cell_2 = self._set_cell(v=concepto, cell=cell_2)
# ~ cell_3 = self._set_cell(v=importe, cell=cell_3, value=True)
2018-02-02 13:46:15 -06:00
return
2018-08-30 19:24:33 -05:00
def _cfdipays(self, data):
2023-02-18 12:56:22 -06:00
VERSION2 = '2.0'
2023-01-01 22:58:58 -06:00
version = data['Version']
2018-08-30 19:24:33 -05:00
related = data.pop('related', [])
for k, v in data.items():
if k.lower() in ('monto',):
self._set_cell('{pago.%s}' % k, v, value=True)
else:
self._set_cell('{pago.%s}' % k, v)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
count = len(related)
for i, doc in enumerate(related):
uuid = doc['IdDocumento'].upper()
2018-10-03 15:22:47 -05:00
serie = doc.get('Serie', '')
2018-08-30 19:24:33 -05:00
folio = doc['Folio']
metodo_pago = doc['MetodoDePagoDR']
moneda = doc['MonedaDR']
parcialidad = doc['NumParcialidad']
saldo_anterior = doc['ImpSaldoAnt']
importe_pagado = doc['ImpPagado']
saldo_insoluto = doc['ImpSaldoInsoluto']
if i == 0:
cell_1 = self._set_cell('{doc.uuid}', uuid)
cell_2 = self._set_cell('{doc.serie}', serie)
cell_3 = self._set_cell('{doc.folio}', folio)
2023-02-18 12:56:22 -06:00
if version != VERSION2:
cell_4 = self._set_cell('{doc.metodopago}', metodo_pago)
2018-08-30 19:24:33 -05:00
cell_5 = self._set_cell('{doc.moneda}', moneda)
cell_6 = self._set_cell('{doc.parcialidad}', parcialidad)
cell_7 = self._set_cell('{doc.saldoanterior}', saldo_anterior, value=True)
cell_8 = self._set_cell('{doc.importepagado}', importe_pagado, value=True)
cell_9 = self._set_cell('{doc.saldoinsoluto}', saldo_insoluto, value=True)
else:
col1.append((uuid,))
col2.append((serie,))
col3.append((folio,))
2023-02-18 12:56:22 -06:00
if version != VERSION2:
col4.append((metodo_pago,))
2018-08-30 19:24:33 -05:00
col5.append((moneda,))
col6.append((parcialidad,))
col7.append((float(saldo_anterior),))
col8.append((float(importe_pagado),))
col9.append((float(saldo_insoluto),))
if count == 1:
return
count -= 1
row1 = cell_1.getCellAddress().Row + 1
row2 = row1 + count - 1
self._sheet.getRows().insertByIndex(row1, count)
self._copy_paste_rows(cell_1, count)
# ~ style_7 = self._get_style(cell_7)
# ~ style_8 = self._get_style(cell_8)
# ~ style_9 = self._get_style(cell_9)
col = cell_1.getCellAddress().Column
target1 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
col = cell_2.getCellAddress().Column
target2 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
col = cell_3.getCellAddress().Column
target3 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
2023-02-18 12:56:22 -06:00
if version != VERSION2:
col = cell_4.getCellAddress().Column
target4 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
2018-08-30 19:24:33 -05:00
col = cell_5.getCellAddress().Column
target5 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
col = cell_6.getCellAddress().Column
target6 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
col = cell_7.getCellAddress().Column
target7 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
col = cell_8.getCellAddress().Column
target8 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
col = cell_9.getCellAddress().Column
target9 = self._sheet.getCellRangeByPosition(col, row1, col, row2)
target1.setFormulaArray(tuple(col1))
target2.setDataArray(tuple(col2))
target3.setFormulaArray(tuple(col3))
2023-02-18 12:56:22 -06:00
if version != VERSION2:
target4.setDataArray(tuple(col4))
2018-08-30 19:24:33 -05:00
target5.setDataArray(tuple(col5))
target6.setDataArray(tuple(col6))
target7.setDataArray(tuple(col7))
target8.setDataArray(tuple(col8))
target9.setDataArray(tuple(col9))
return
2017-10-15 02:30:55 -05:00
def _render(self, data):
self._set_search()
2017-11-05 19:53:27 -06:00
self._es_pre = data.pop('es_pre', False)
2017-12-31 00:17:20 -06:00
self._is_ticket = data.pop('is_ticket', False)
2018-05-08 13:20:18 -05:00
self._currency = data['totales']['moneda']
2018-08-30 19:24:33 -05:00
self._pagos = data.pop('pagos', False)
2018-05-08 13:20:18 -05:00
2019-02-04 22:13:11 -06:00
pakings = data.pop('pakings', [])
2017-10-15 02:30:55 -05:00
self._comprobante(data['comprobante'])
2023-02-01 22:26:10 -06:00
self._informacion_global(data.get('informacion_global', {}))
2017-10-15 17:20:20 -05:00
self._emisor(data['emisor'])
self._receptor(data['receptor'])
2019-02-04 22:13:11 -06:00
self._conceptos(data['conceptos'], pakings)
2018-08-30 19:24:33 -05:00
if self._pagos:
self._cfdipays(data['pays'])
2018-02-02 13:46:15 -06:00
if 'nomina' in data and data['nomina']:
self._nomina(data['nomina'])
else:
self._totales(data['totales'])
2017-11-25 20:26:15 -06:00
self._donataria(data['donataria'])
2017-11-26 00:15:14 -06:00
self._ine(data['ine'])
2019-02-15 14:38:41 -06:00
self._divisas(data.get('divisas', {}))
2020-03-08 15:26:44 -06:00
self._leyendas(data.get('leyendas', ''))
self._carta_porte(data.get('carta_porte', {}))
2019-02-15 14:38:41 -06:00
self._timbre(data['timbre'])
2017-10-15 17:20:20 -05:00
self._cancelado(data['cancelada'])
2020-12-16 16:04:32 -06:00
self._others_values(data)
2017-10-15 17:20:20 -05:00
self._clean()
2017-10-15 02:30:55 -05:00
return
2020-12-16 16:04:32 -06:00
def _others_values(self, data):
2021-02-02 22:09:12 -06:00
el_version = data.get('el.version', '')
if el_version:
self._set_cell('{el.version}', el_version)
2020-12-16 16:04:32 -06:00
return
2018-01-18 00:15:14 -06:00
def pdf(self, path, data, ods=False):
2017-10-15 02:30:55 -05:00
options = {'AsTemplate': True, 'Hidden': True}
2018-03-07 23:24:35 -06:00
log.debug('Abrir plantilla...')
2017-10-15 02:30:55 -05:00
self._template = self._doc_open(path, options)
if self._template is None:
return b''
2017-12-17 22:03:19 -06:00
self._template.setPrinter(self._set_properties({'PaperFormat': LETTER}))
2017-10-15 02:30:55 -05:00
self._render(data)
2018-02-08 22:24:32 -06:00
path_ods = get_path_temp('.ods')
self._template.storeToURL(self._path_url(path_ods), ())
2018-01-18 00:15:14 -06:00
if ods:
2018-02-08 22:24:32 -06:00
data = self._read(path_ods)
_kill(path_ods)
2018-02-05 23:59:49 -06:00
return data
2017-10-15 17:20:20 -05:00
2017-10-15 02:30:55 -05:00
options = {'FilterName': 'calc_pdf_Export'}
2018-02-08 22:24:32 -06:00
path_pdf = get_path_temp('.pdf')
2018-02-22 15:12:04 -06:00
self._template.storeToURL(self._path_url(path_pdf), self._set_properties(options))
2018-08-09 15:51:11 -05:00
try:
self._template.close(True)
except:
pass
2018-02-08 22:24:32 -06:00
data = self._read(path_pdf)
_kill(path_ods)
_kill(path_pdf)
2018-02-05 23:59:49 -06:00
return data
2017-10-15 02:30:55 -05:00
2018-01-19 15:42:00 -06:00
def _get_data(self, doc, name=0):
try:
sheet = doc.getSheets()[name]
cursor = sheet.createCursorByRange(sheet['A1'])
cursor.collapseToCurrentRegion()
except KeyError:
msg = 'Hoja no existe'
return (), msg
return cursor.getDataArray(), ''
def products(self, path):
options = {'AsTemplate': True, 'Hidden': True}
doc = self._doc_open(path, options)
if doc is None:
2018-01-19 22:16:19 -06:00
return (), 'No se pudo abrir la plantilla'
2018-01-19 15:42:00 -06:00
data, msg = self._get_data(doc)
doc.close(True)
if len(data) == 1:
msg = 'Sin datos para importar'
return (), msg
fields = (
'categoria',
'clave',
'clave_sat',
'descripcion',
'unidad',
'valor_unitario',
'inventario',
'existencia',
'codigo_barras',
'impuestos',
)
rows = [dict(zip(fields, r)) for r in data[1:]]
return rows, ''
2018-01-19 01:00:22 -06:00
def employees(self, path):
2018-01-19 22:16:19 -06:00
options = {'AsTemplate': True, 'Hidden': True}
doc = self._doc_open(path, options)
if doc is None:
2018-01-19 01:00:22 -06:00
return ()
2018-01-19 22:16:19 -06:00
2018-01-19 01:00:22 -06:00
data, msg = self._get_data(doc, 'Empleados')
2018-01-19 22:16:19 -06:00
doc.close(True)
if len(data) == 1:
msg = 'Sin datos para importar'
return (), msg
2018-01-19 01:00:22 -06:00
fields = (
'num_empleado',
'rfc',
'curp',
'nombre',
'paterno',
'materno',
'fecha_ingreso',
'imss',
'tipo_contrato',
'es_sindicalizado',
'tipo_jornada',
'tipo_regimen',
'departamento',
'puesto',
'riesgo_puesto',
'periodicidad_pago',
'banco',
'cuenta_bancaria',
'clabe',
'salario_base',
'salario_diario',
'estado',
'codigo_postal',
'notas',
'correo',
2022-06-04 21:52:28 -05:00
'regimen_fiscal',
2018-01-19 01:00:22 -06:00
)
2018-01-24 00:51:09 -06:00
rows = tuple([dict(zip(fields, r)) for r in data[1:]])
2018-01-19 01:00:22 -06:00
msg = 'Empleados importados correctamente'
return rows, msg
2018-01-19 22:16:19 -06:00
2018-01-28 03:12:35 -06:00
def _get_nomina(self, doc):
rows, msg = self._get_data(doc, 'Nomina')
if len(rows) == 2:
msg = 'Sin datos para importar'
return {}, msg
fields = (
'rfc',
'tipo_nomina',
'fecha_pago',
'fecha_inicial_pago',
'fecha_final_pago',
2018-02-16 22:16:46 -06:00
'relacionados',
'dias_pagados',
2018-01-28 03:12:35 -06:00
)
data = tuple([dict(zip(fields, r[1:])) for r in rows[2:]])
return data, ''
def _get_percepciones(self, doc, count):
rows, msg = self._get_data(doc, 'Percepciones')
if len(rows) == 2:
msg = 'Sin Percepciones'
return {}, msg
if len(rows[0][2:]) % 2:
msg = 'Las Percepciones deben ir en pares: Gravado y Exento'
return {}, msg
data = tuple([r[2:] for r in rows[:count+2]])
return data, ''
def _get_deducciones(self, doc, count):
rows, msg = self._get_data(doc, 'Deducciones')
if len(rows) == 2:
msg = 'Sin Deducciones'
return {}, msg
data = tuple([r[2:] for r in rows[:count+2]])
2019-02-16 22:07:19 -06:00
sheet = doc.Sheets['Deducciones']
notes = sheet.getAnnotations()
new_titles = {}
for n in notes:
col = n.getPosition().Column - 2
if data[0][col] == '004':
new_titles[col] = n.getString()
return data, new_titles, ''
2018-01-28 03:12:35 -06:00
def _get_otros_pagos(self, doc, count):
rows, msg = self._get_data(doc, 'OtrosPagos')
if len(rows) == 2:
msg = 'Sin Otros Pagos'
return {}, msg
data = tuple([r[2:] for r in rows[:count+2]])
2020-01-29 16:03:02 -06:00
2018-01-28 03:12:35 -06:00
return data, ''
2018-04-09 00:47:14 -05:00
def _get_separacion(self, doc, count):
rows, msg = self._get_data(doc, 'Separacion')
if len(rows) == 2:
msg = 'Sin Separacion'
return {}, msg
data = tuple([r[1:] for r in rows[:count+2]])
return data, ''
2018-01-28 03:12:35 -06:00
def _get_horas_extras(self, doc, count):
rows, msg = self._get_data(doc, 'HorasExtras')
if len(rows) == 2:
msg = 'Sin Horas Extras'
return {}, msg
if len(rows[1][1:]) % 4:
msg = 'Las Horas Extras deben ir grupos de 4 columnas'
return {}, msg
data = tuple([r[1:] for r in rows[:count+2]])
return data, ''
def _get_incapacidades(self, doc, count):
rows, msg = self._get_data(doc, 'Incapacidades')
if len(rows) == 2:
msg = 'Sin Incapacidades'
return {}, msg
if len(rows[1][1:]) % 3:
msg = 'Las Incapacidades deben ir grupos de 3 columnas'
return {}, msg
data = tuple([r[1:] for r in rows[:count+2]])
return data, ''
2018-01-26 01:52:59 -06:00
def nomina(self, path):
options = {'AsTemplate': True, 'Hidden': True}
doc = self._doc_open(path, options)
if doc is None:
2018-01-28 03:12:35 -06:00
msg = 'No se pudo abrir la plantilla'
return {}, msg
data = {}
nomina, msg = self._get_nomina(doc)
if msg:
doc.close(True)
return {}, msg
percepciones, msg = self._get_percepciones(doc, len(nomina))
if msg:
doc.close(True)
return {}, msg
2019-02-16 22:07:19 -06:00
deducciones, new_titles, msg = self._get_deducciones(doc, len(nomina))
2018-01-28 03:12:35 -06:00
if msg:
doc.close(True)
return {}, msg
otros_pagos, msg = self._get_otros_pagos(doc, len(nomina))
if msg:
doc.close(True)
return {}, msg
2018-04-09 00:47:14 -05:00
separacion, msg = self._get_separacion(doc, len(nomina))
if msg:
doc.close(True)
return {}, msg
2018-01-28 03:12:35 -06:00
horas_extras, msg = self._get_horas_extras(doc, len(nomina))
if msg:
doc.close(True)
return {}, msg
incapacidades, msg = self._get_incapacidades(doc, len(nomina))
if msg:
doc.close(True)
return {}, msg
2018-01-26 01:52:59 -06:00
doc.close(True)
2019-02-16 22:07:19 -06:00
rows = len(nomina) + 2
if rows != len(percepciones):
msg = 'Cantidad de filas incorrecta en: Percepciones'
return {}, msg
if rows != len(deducciones):
msg = 'Cantidad de filas incorrecta en: Deducciones'
return {}, msg
if rows != len(otros_pagos):
msg = 'Cantidad de filas incorrecta en: Otros Pagos'
return {}, msg
if rows != len(separacion):
msg = 'Cantidad de filas incorrecta en: Separación'
return {}, msg
if rows != len(horas_extras):
msg = 'Cantidad de filas incorrecta en: Horas Extras'
return {}, msg
if rows != len(incapacidades):
msg = 'Cantidad de filas incorrecta en: Incapacidades'
return {}, msg
2018-01-28 03:12:35 -06:00
data['nomina'] = nomina
data['percepciones'] = percepciones
data['deducciones'] = deducciones
data['otros_pagos'] = otros_pagos
2018-04-09 00:47:14 -05:00
data['separacion'] = separacion
2018-01-28 03:12:35 -06:00
data['horas_extras'] = horas_extras
data['incapacidades'] = incapacidades
2019-02-16 22:07:19 -06:00
data['new_titles'] = new_titles
2018-01-26 01:52:59 -06:00
2018-01-28 03:12:35 -06:00
return data, ''
2018-01-26 01:52:59 -06:00
2018-01-19 22:16:19 -06:00
def invoice(self, path):
options = {'AsTemplate': True, 'Hidden': True}
doc = self._doc_open(path, options)
if doc is None:
return (), 'No se pudo abrir la plantilla'
data, msg = self._get_data(doc)
doc.close(True)
if len(data) == 1:
msg = 'Sin datos para importar'
return (), msg
rows = tuple(data[1:])
return rows, ''
2017-10-15 02:30:55 -05:00
2019-01-10 23:37:37 -06:00
def to_pdf(data, emisor_rfc, ods=False, pdf_from='1'):
2017-10-25 19:46:13 -05:00
rfc = data['emisor']['rfc']
2017-11-25 20:26:15 -06:00
if DEBUG:
rfc = emisor_rfc
2017-10-25 19:46:13 -05:00
version = data['comprobante']['version']
2022-05-29 22:57:26 -05:00
default = f'plantilla_factura_{version}.ods'
2023-01-10 00:06:40 -06:00
if pdf_from == '2':
return to_pdf_from_json(rfc, version, data)
2018-02-02 13:46:15 -06:00
if 'nomina' in data and data['nomina']:
2022-06-04 21:52:28 -05:00
version_nomina = data['nomina']['version']
default = f'plantilla_nomina_{version}_{version_nomina}.ods'
version = f'{version}_cn_{version_nomina}'
if 'carta_porte' in data:
2023-01-01 22:58:58 -06:00
default = 'plantilla_factura_ccp.ods'
version = '{}_ccp_{}'.format(version, data['carta_porte']['version'])
2017-10-25 19:46:13 -05:00
2018-08-30 19:24:33 -05:00
if data.get('pagos', False):
2023-01-01 22:58:58 -06:00
version_pagos = data['pays']['version']
default = f'plantilla_pagos_{version}_{version_pagos}.ods'
version = f'{version}_cp_{version_pagos}'
2018-08-30 19:24:33 -05:00
2022-06-04 21:52:28 -05:00
if data['donativo']:
2023-01-10 00:06:40 -06:00
version_donatarias = data['donataria']['version']
default = f'plantilla_donatarias_{version}_{version_donatarias}.ods'
version = f'{version}_cd_{version_donatarias}'
2022-06-04 21:52:28 -05:00
template_name = f'{rfc.lower()}_{version}.ods'
2023-01-01 22:58:58 -06:00
# ~ print('T', template_name, default)
2022-06-04 21:52:28 -05:00
2017-10-25 19:46:13 -05:00
if APP_LIBO:
app = LIBO()
if app.is_running:
2022-06-04 21:52:28 -05:00
path = get_template_ods(template_name, default)
2017-10-25 19:46:13 -05:00
if path:
2018-01-18 00:15:14 -06:00
return app.pdf(path, data, ods)
2017-10-25 19:46:13 -05:00
2019-01-10 23:37:37 -06:00
return to_pdf_from_json(rfc, version, data)
def to_pdf_from_json(rfc, version, data):
2019-01-14 02:07:42 -06:00
rfc = rfc.lower()
2017-10-25 19:46:13 -05:00
name = '{}_{}.json'.format(rfc, version)
2023-02-01 22:26:10 -06:00
print('name', name)
2017-10-25 19:46:13 -05:00
custom_styles = get_custom_styles(name)
path_logo = _join(PATHS['LOGOS'], f"{rfc}.png")
if exists(path_logo):
data['emisor']['logo'] = path_logo
path_logo = _join(PATHS['LOGOS'], f"{rfc}_2.png")
if exists(path_logo):
data['emisor']['logo2'] = path_logo
2019-01-14 02:07:42 -06:00
2021-06-29 19:07:55 -05:00
buffer = io.BytesIO()
pdf = TemplateInvoice(buffer)
2017-10-25 19:46:13 -05:00
pdf.custom_styles = custom_styles
2017-10-23 00:45:41 -05:00
pdf.data = data
pdf.render()
2019-03-03 23:01:31 -06:00
2021-06-29 19:07:55 -05:00
return buffer.getvalue()
2017-10-15 02:30:55 -05:00
2018-11-19 01:03:48 -06:00
def format_currency(value, currency, digits=2):
c = {
2018-11-20 00:03:07 -06:00
MXN: '$',
2018-11-19 01:03:48 -06:00
'USD': '$',
'EUR': '',
}
2018-11-20 00:03:07 -06:00
s = c.get(currency, MXN)
2018-11-19 01:03:48 -06:00
return f'{s} {float(value):,.{digits}f}'
def to_html(data):
name = f"{data['rfc']}_{data['version']}.html"
try:
template = template_lookup.get_template(name)
except TopLevelLookupException:
template = template_lookup.get_template('plantilla_factura.html')
2018-11-20 00:03:07 -06:00
data['rfc'] = 'invoice'
2018-11-19 01:03:48 -06:00
2019-01-10 23:37:37 -06:00
# ~ data['cfdi_sello'] = textwrap.fill(data['cfdi_sello'], 50)
# ~ data['timbre_sellosat'] = textwrap.fill(data['timbre_sellosat'], 110)
# ~ data['timbre_cadenaoriginal'] = textwrap.fill(data['timbre_cadenaoriginal'], 140)
# ~ data['cfdi_sello'] = 'X'*100 + '<BR>' + 'X'*100 + '<BR>' + 'X'*100
# ~ data['timbre_sellosat'] = 'X'*100 + '<BR>' + 'X'*100 + '<BR>' + 'X'*100
2018-11-19 01:03:48 -06:00
return template.render(**data)
2019-01-10 23:37:37 -06:00
def html_to_pdf(data):
path_pdf = '/home/mau/test.pdf'
css = '/home/mau/projects/empresa-libre/source/static/css/invoice.css'
# ~ font_config = FontConfiguration()
# ~ html = HTML(string=data)
# ~ css = CSS(filename=path_css)
# ~ html.write_pdf(path_pdf, stylesheets=[css], font_config=font_config)
options = {
'page-size': 'Letter',
'margin-top': '0.50in',
'margin-right': '0.50in',
'margin-bottom': '0.50in',
'margin-left': '0.50in',
'encoding': "UTF-8",
}
pdfkit.from_string(data.decode(), path_pdf, options=options, css=css)
return
2018-01-19 01:00:22 -06:00
def import_employees(rfc):
2022-06-17 13:30:21 -05:00
msg = 'No se pudo cargar el archivo'
2018-01-19 01:00:22 -06:00
name = '{}_employees.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'tmp', name)
if not is_file(path):
2022-06-17 13:30:21 -05:00
return (), msg
2018-01-19 01:00:22 -06:00
2018-06-18 14:10:52 -05:00
msg = 'LibreOffice no se pudo iniciar'
2018-01-19 01:00:22 -06:00
if APP_LIBO:
app = LIBO()
if app.is_running:
return app.employees(path)
2018-06-18 14:10:52 -05:00
return (), msg
2018-01-19 01:00:22 -06:00
2018-01-26 01:52:59 -06:00
def import_nomina(rfc):
name = '{}_nomina.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'tmp', name)
if not is_file(path):
return ()
if APP_LIBO:
app = LIBO()
if app.is_running:
return app.nomina(path)
return ()
2017-10-15 02:30:55 -05:00
def parse_xml(xml):
2018-02-07 00:48:06 -06:00
try:
return ET.fromstring(xml)
except ET.ParseError:
return None
def to_pretty_xml(xml):
tree = parseString(xml)
return tree.toprettyxml(encoding='utf-8').decode('utf-8')
2017-10-15 02:30:55 -05:00
def get_dict(data):
return CaseInsensitiveDict(data)
2018-11-19 01:03:48 -06:00
def to_letters(value, currency):
return NumLet(value, currency).letras
2017-10-15 02:30:55 -05:00
2021-12-30 11:56:22 -06:00
# ~ def get_qr(data, p=True):
# ~ qr = pyqrcode.create(data, mode='binary')
# ~ if p:
# ~ path = get_path_temp('.qr')
# ~ qr.png(path, scale=7)
# ~ return path
2018-11-20 00:03:07 -06:00
2021-12-30 11:56:22 -06:00
# ~ buffer = io.BytesIO()
# ~ qr.png(buffer, scale=8)
# ~ return base64.b64encode(buffer.getvalue()).decode()
2017-10-15 02:30:55 -05:00
2021-12-30 11:56:22 -06:00
# ~ def get_qr2(data, kind='svg'):
# ~ buffer = io.BytesIO()
# ~ segno.make(data).save(buffer, kind=kind, scale=8, border=2)
# ~ return buffer
2021-06-29 19:07:55 -05:00
2017-11-11 15:03:20 -06:00
def _get_relacionados(doc, version):
node = doc.find('{}CfdiRelacionados'.format(PRE[version]))
if node is None:
return ''
2020-12-16 16:04:32 -06:00
uuids = ['UUID: {}'.format(n.attrib['UUID']) for n in list(node)]
2017-11-11 15:03:20 -06:00
return '\n'.join(uuids)
def _comprobante(doc, options):
data = CaseInsensitiveDict(doc.attrib.copy())
2017-10-15 02:30:55 -05:00
del data['certificado']
2017-10-15 17:20:20 -05:00
2018-01-08 22:46:56 -06:00
serie = ''
if 'serie' in data:
serie = '{}-'.format(data['serie'])
2018-03-06 21:25:32 -06:00
data['seriefolio'] = '{}{}'.format(serie, data.get('folio', ''))
2017-10-15 02:30:55 -05:00
data['totalenletras'] = to_letters(float(data['total']), data['moneda'])
2018-02-02 13:46:15 -06:00
is_nomina = options.get('is_nomina', False)
if is_nomina:
2018-02-06 12:07:35 -06:00
data['formadepago'] = options['formadepago']
data['periodicidaddepago'] = options['periodicidaddepago']
2018-02-16 22:16:46 -06:00
data['tiporelacion'] = options.get('tiporelacion', '')
2018-02-02 13:46:15 -06:00
return data
2022-03-10 20:29:50 -06:00
if data['version'] in CFDI_VERSIONS:
2017-10-15 02:30:55 -05:00
tipos = {
'I': 'ingreso',
'E': 'egreso',
'T': 'traslado',
2018-08-30 19:24:33 -05:00
'P': 'pago',
2017-10-15 02:30:55 -05:00
}
data['tipodecomprobante'] = tipos.get(data['tipodecomprobante'])
data['lugarexpedicion'] = \
'C.P. de Expedición: {}'.format(data['lugarexpedicion'])
2018-02-16 13:52:30 -06:00
if 'metododepago' in options:
data['metododepago'] = options['metododepago']
2018-02-20 18:04:37 -06:00
if 'formadepago' in options:
data['formadepago'] = options['formadepago']
2018-02-06 12:07:35 -06:00
2017-11-06 19:55:58 -06:00
if 'condicionesdepago' in data:
data['condicionesdepago'] = \
'Condiciones de pago: {}'.format(data['condicionesdepago'])
2017-10-15 17:20:20 -05:00
data['moneda'] = options['moneda']
2017-11-11 15:03:20 -06:00
data['tiporelacion'] = options.get('tiporelacion', '')
data['relacionados'] = _get_relacionados(doc, data['version'])
2017-11-11 13:42:51 -06:00
else:
fields = {
'formaDePago': 'Forma de Pago: {}\n',
'metodoDePago': 'Método de pago: {}\n',
'condicionesDePago': 'Condiciones de Pago: {}\n',
'NumCtaPago': 'Número de Cuenta de Pago: {}\n',
'Moneda': 'Moneda: {}\n',
'TipoCambio': 'Tipo de Cambio: {}',
}
datos = ''
for k, v in fields.items():
if k in data:
datos += v.format(data[k])
data['datos'] = datos
2017-12-06 16:38:42 -06:00
data['hora'] = data['fecha'].split('T')[1]
2017-11-11 13:42:51 -06:00
fecha = parser.parse(data['fecha'])
try:
locale.setlocale(locale.LC_TIME, "es_MX.UTF-8")
except:
pass
data['fechaformato'] = fecha.strftime('%A, %d de %B de %Y')
2017-10-15 17:20:20 -05:00
2018-02-07 00:48:06 -06:00
if 'tipocambio' in data:
2018-06-03 00:00:04 -05:00
data['tipocambio'] = 'Tipo de Cambio: $ {:0.4f}'.format(
2018-02-07 00:48:06 -06:00
float(data['tipocambio']))
2017-12-29 04:09:02 -06:00
data['notas'] = options['notas']
2017-11-11 13:42:51 -06:00
2017-10-15 02:30:55 -05:00
return data
2017-10-15 17:20:20 -05:00
def _emisor(doc, version, values):
2017-11-09 23:51:54 -06:00
emisor = doc.find('{}Emisor'.format(PRE[version]))
data = CaseInsensitiveDict(emisor.attrib.copy())
node = emisor.find('{}DomicilioFiscal'.format(PRE[version]))
2017-10-15 17:20:20 -05:00
if not node is None:
2017-10-23 00:45:41 -05:00
data.update(CaseInsensitiveDict(node.attrib.copy()))
2017-11-09 23:51:54 -06:00
if version == '3.2':
node = emisor.find('{}RegimenFiscal'.format(PRE[version]))
if not node is None:
data['regimenfiscal'] = node.attrib['Regimen']
2017-12-06 16:38:42 -06:00
data['regimen'] = node.attrib['Regimen']
2017-11-09 23:51:54 -06:00
else:
data['regimenfiscal'] = values['regimenfiscal']
2017-10-24 00:03:07 -05:00
path = _join(PATH_MEDIA, 'logos', '{}.png'.format(data['rfc'].lower()))
if is_file(path):
data['logo'] = path
2017-10-15 17:20:20 -05:00
return data
def _receptor(doc, version, values):
node = doc.find('{}Receptor'.format(PRE[version]))
data = CaseInsensitiveDict(node.attrib.copy())
node = node.find('{}Domicilio'.format(PRE[version]))
if not node is None:
data.update(node.attrib.copy())
2017-11-09 23:51:54 -06:00
if version == '3.2':
return data
2017-10-15 17:20:20 -05:00
data['usocfdi'] = values['usocfdi']
2022-05-30 13:30:47 -05:00
# ~ data.update(values['receptor'])
2017-10-15 17:20:20 -05:00
return data
2018-02-02 13:46:15 -06:00
def _conceptos(doc, version, options):
is_nomina = options.get('is_nomina', False)
2017-10-15 17:20:20 -05:00
data = []
conceptos = doc.find('{}Conceptos'.format(PRE[version]))
2020-12-16 16:04:32 -06:00
# ~ for c in conceptos.getchildren():
for c in list(conceptos):
2017-10-15 17:20:20 -05:00
values = CaseInsensitiveDict(c.attrib.copy())
2018-02-02 13:46:15 -06:00
if is_nomina:
values['noidentificacion'] = values['ClaveProdServ']
values['unidad'] = values['ClaveUnidad']
data.append(values)
continue
2022-03-10 20:29:50 -06:00
if version in CFDI_VERSIONS:
2018-02-07 00:48:06 -06:00
if 'noidentificacion' in values:
values['noidentificacion'] = '{}\n(SAT {})'.format(
values['noidentificacion'], values['ClaveProdServ'])
else:
values['noidentificacion'] = 'SAT {}'.format(
values['ClaveProdServ'])
if 'unidad' in values:
values['unidad'] = '({})\n{}'.format(
values['ClaveUnidad'], values['unidad'])
else:
values['unidad'] = '{}'.format(values['ClaveUnidad'])
2018-01-08 16:00:55 -06:00
n = c.find('{}CuentaPredial'.format(PRE[version]))
if n is not None:
v = CaseInsensitiveDict(n.attrib.copy())
info = '\nCuenta Predial Número: {}'.format(v['numero'])
values['descripcion'] += info
2018-01-08 16:00:55 -06:00
n = c.find('{}InformacionAduanera'.format(PRE[version]))
if n is not None:
v = CaseInsensitiveDict(n.attrib.copy())
info = '\nNúmero Pedimento: {}'.format(v['numeropedimento'])
values['descripcion'] += info
2018-01-29 13:21:06 -06:00
n = c.find('{}ComplementoConcepto'.format(PRE[version]))
if n is not None:
v = CaseInsensitiveDict(n[0].attrib.copy())
info = '\nAlumno: {} (CURP: {})\nNivel: {}, Autorización: {}'.format(
v['nombreAlumno'], v['CURP'], v['nivelEducativo'], v['autRVOE'])
values['descripcion'] += info
2017-10-15 17:20:20 -05:00
data.append(values)
return data
def _totales(doc, cfdi, version):
data = {}
data['moneda'] = doc.attrib['Moneda']
data['subtotal'] = cfdi['subtotal']
if 'descuento' in cfdi:
data['descuento'] = cfdi['descuento']
data['total'] = cfdi['total']
tn = {
'001': 'ISR',
'002': 'IVA',
2017-10-16 23:09:26 -05:00
'003': 'IEPS',
2017-10-15 17:20:20 -05:00
}
traslados = []
retenciones = []
taxlocales = []
imp = doc.find('{}Impuestos'.format(PRE[version]))
if imp is not None:
tmp = CaseInsensitiveDict(imp.attrib.copy())
for k, v in tmp.items():
data[k] = v
node = imp.find('{}Traslados'.format(PRE[version]))
if node is not None:
2020-12-16 16:04:32 -06:00
# ~ for n in node.getchildren():
for n in list(node):
2017-10-15 17:20:20 -05:00
tmp = CaseInsensitiveDict(n.attrib.copy())
2022-03-10 20:29:50 -06:00
if version in CFDI_VERSIONS:
2023-02-16 23:09:35 -06:00
tasa = ''
if 'tasaocuota' in tmp:
tasa = round(float(tmp['tasaocuota']), DECIMALES)
2018-02-07 00:48:06 -06:00
title = 'Traslado {} {}'.format(tn.get(tmp['impuesto']), tasa)
2017-10-15 17:20:20 -05:00
else:
title = 'Traslado {} {}'.format(tmp['impuesto'], tmp['tasa'])
2023-02-16 23:09:35 -06:00
if 'importe' in tmp:
traslados.append((title, float(tmp['importe'])))
2017-10-15 17:20:20 -05:00
node = imp.find('{}Retenciones'.format(PRE[version]))
if node is not None:
2020-12-16 16:04:32 -06:00
# ~ for n in node.getchildren():
for n in list(node):
2017-10-15 17:20:20 -05:00
tmp = CaseInsensitiveDict(n.attrib.copy())
2022-03-10 20:29:50 -06:00
if version in CFDI_VERSIONS:
2017-10-15 17:20:20 -05:00
title = 'Retención {} {}'.format(
tn.get(tmp['impuesto']), '')
else:
title = 'Retención {} {}'.format(tmp['impuesto'], '')
retenciones.append((title, float(tmp['importe'])))
2017-11-29 23:57:31 -06:00
node = doc.find('{}Complemento/{}ImpuestosLocales'.format(
PRE[version], PRE['LOCALES']))
if node is not None:
for otro in list(node):
if otro.tag == '{}RetencionesLocales'.format(PRE['LOCALES']):
tipo = 'Retención '
name = 'ImpLocRetenido'
tasa = 'TasadeRetencion'
else:
tipo = 'Traslado '
name = 'ImpLocTrasladado'
tasa = 'TasadeTraslado'
title = '{} {} {}%'.format(
tipo, otro.attrib[name], otro.attrib[tasa])
importe = float(otro.attrib['Importe'])
taxlocales.append((title, importe))
2017-10-15 17:20:20 -05:00
data['traslados'] = traslados
data['retenciones'] = retenciones
data['taxlocales'] = taxlocales
return data
2021-06-29 19:07:55 -05:00
def _timbre(doc, version, values, pdf_from='1'):
2017-10-15 02:30:55 -05:00
CADENA = '||{version}|{UUID}|{FechaTimbrado}|{selloCFD}|{noCertificadoSAT}||'
2022-03-10 20:29:50 -06:00
if version in CFDI_VERSIONS:
2017-10-15 02:30:55 -05:00
CADENA = '||{Version}|{UUID}|{FechaTimbrado}|{SelloCFD}|{NoCertificadoSAT}||'
node = doc.find('{}Complemento/{}TimbreFiscalDigital'.format(
PRE[version], PRE['TIMBRE']))
data = CaseInsensitiveDict(node.attrib.copy())
2018-02-13 11:42:38 -06:00
qr_data = {
'url': 'https://verificacfdi.facturaelectronica.sat.gob.mx/default.aspx?',
'uuid': '&id={}'.format(data['uuid']),
'emisor': '&re={}'.format(values['rfc_emisor']),
'receptor': '&rr={}'.format(values['rfc_receptor']),
'total': '&tt={}'.format(values['total']),
'sello': '&fe={}'.format(data['sellocfd'][-8:]),
}
qr_data = '{url}{uuid}{emisor}{receptor}{total}{sello}'.format(**qr_data)
2018-11-20 00:03:07 -06:00
data['cbb'] = get_qr(qr_data, 'png')
# ~ if pdf_from == '1':
# ~ data['cbb'] = get_qr(qr_data, 'png')
# ~ else:
# ~ data['cbb'] = get_qr(qr_data)
2021-06-29 19:07:55 -05:00
2018-02-13 11:42:38 -06:00
data['cadenaoriginal'] = CADENA.format(**data)
2017-10-15 02:30:55 -05:00
return data
2017-11-25 20:32:36 -06:00
def _donataria(doc, version, fechadof):
2017-11-25 20:26:15 -06:00
node = doc.find('{}Complemento/{}Donatarias'.format(
PRE[version], PRE['DONATARIA']))
if node is None:
return {}
data = CaseInsensitiveDict(node.attrib.copy())
2017-11-25 20:32:36 -06:00
data['fechadof'] = fechadof
2017-11-25 20:26:15 -06:00
return data
2017-11-26 00:15:14 -06:00
def _ine(doc, version):
node = doc.find('{}Complemento/{}INE'.format(PRE[version], PRE['INE']))
if node is None:
return {}
values = (
('TipoComite', 'Tipo de Comite: {}'),
('TipoProceso', 'Tipo de Proceso: {}'),
('IdContabilidad', 'ID de Contabilidad: {}'),
)
data = CaseInsensitiveDict(node.attrib.copy())
for k, v in values:
2021-05-31 12:59:45 -05:00
if k in data:
data[k] = v.format(data[k])
try:
node = node[0]
attr = CaseInsensitiveDict(node.attrib.copy())
values = (
('ClaveEntidad', 'Clave de la Entidad: {}'),
('Ambito', 'Ámbito: {}'),
)
for k, v in values:
if k in attr:
data[k] = v.format(attr[k])
node = node[0]
attr = CaseInsensitiveDict(node.attrib.copy())
values = (
('IdContabilidad', 'ID de Contabilidad: {}'),
)
for k, v in values:
if k in attr:
data[k] = v.format(attr[k])
except Exception as e:
print(e)
2017-11-26 00:15:14 -06:00
return data
2018-02-03 01:23:05 -06:00
def _nomina(doc, data, values, version_cfdi):
2018-02-02 13:46:15 -06:00
is_nomina = values.get('is_nomina', False)
if not is_nomina:
return {}
2018-02-03 01:23:05 -06:00
version = values['version']
node_nomina = doc.find('{}Complemento/{}Nomina'.format(
PRE[version_cfdi], PRE['NOMINA'][version]))
if node_nomina is None:
return {}
info = CaseInsensitiveDict(node_nomina.attrib.copy())
node = node_nomina.find('{}Emisor'.format(PRE['NOMINA'][version]))
2018-02-08 17:58:24 -06:00
if not node is None:
data['emisor'].update(CaseInsensitiveDict(node.attrib.copy()))
2018-02-03 01:23:05 -06:00
node = node_nomina.find('{}Receptor'.format(PRE['NOMINA'][version]))
data['receptor'].update(CaseInsensitiveDict(node.attrib.copy()))
node = node_nomina.find('{}Percepciones'.format(PRE['NOMINA'][version]))
if not node is None:
data['comprobante'].update(CaseInsensitiveDict(node.attrib.copy()))
info['percepciones'] = []
2020-12-31 20:17:00 -06:00
for p in list(node):
2018-02-03 01:23:05 -06:00
info['percepciones'].append(CaseInsensitiveDict(p.attrib.copy()))
node = node_nomina.find('{}Deducciones'.format(PRE['NOMINA'][version]))
if not node is None:
data['comprobante'].update(CaseInsensitiveDict(node.attrib.copy()))
info['deducciones'] = []
2020-12-31 20:17:00 -06:00
for d in list(node):
2018-02-03 01:23:05 -06:00
info['deducciones'].append(CaseInsensitiveDict(d.attrib.copy()))
node = node_nomina.find('{}OtrosPagos'.format(PRE['NOMINA'][version]))
if not node is None:
info['otrospagos'] = []
2020-12-31 20:17:00 -06:00
for o in list(node):
2018-02-03 01:23:05 -06:00
info['otrospagos'].append(CaseInsensitiveDict(o.attrib.copy()))
n = o.find('{}SubsidioAlEmpleo'.format(PRE['NOMINA'][version]))
if not n is None:
info.update(CaseInsensitiveDict(n.attrib.copy()))
2018-02-16 22:16:46 -06:00
node = node_nomina.find('{}Incapacidades'.format(PRE['NOMINA'][version]))
if not node is None:
info['incapacidades'] = []
2020-12-31 20:17:00 -06:00
for i in list(node):
2018-02-16 22:16:46 -06:00
info['incapacidades'].append(CaseInsensitiveDict(i.attrib.copy()))
2018-02-03 01:23:05 -06:00
return info
2018-02-02 13:46:15 -06:00
2023-01-02 22:59:39 -06:00
def _get_info_pays_2(node):
pre_pays = PRE_DEFAULT['PAGOS']['PRE']
data = CaseInsensitiveDict(node.attrib.copy())
path = f"{pre_pays}Totales"
totales = node.find(path)
data.update(CaseInsensitiveDict(totales.attrib.copy()))
path = f"{pre_pays}Pago"
node_pay = node.find(path)
data.update(CaseInsensitiveDict(node_pay.attrib.copy()))
related = []
for n in node_pay:
attr = CaseInsensitiveDict(n.attrib.copy())
if attr:
attr['metododepagodr'] = ''
related.append(attr)
data['related'] = related
return data
2018-08-30 19:24:33 -05:00
def _cfdipays(doc, data, version):
2023-02-18 12:40:05 -06:00
pre_pays = PRE_DEFAULT['PAGOS']['PRE']
path = f"{PRE[version]}Complemento/{pre_pays}Pagos"
node = doc.find(path)
if node is None:
pre_pays = PRE['PAGOS']['1.0']
2023-01-02 22:59:39 -06:00
path = f"{PRE[version]}Complemento/{pre_pays}Pagos"
node = doc.find(path)
2023-01-01 22:58:58 -06:00
2018-08-30 19:24:33 -05:00
if node is None:
2023-02-18 12:40:05 -06:00
log.error('Node pays not found...')
2018-08-30 19:24:33 -05:00
return {}
2023-01-02 22:59:39 -06:00
if version == '4.0':
info = _get_info_pays_2(node)
else:
info = CaseInsensitiveDict(node.attrib.copy())
related = []
for n1 in node:
info.update(CaseInsensitiveDict(n1.attrib.copy()))
for n2 in n1:
related.append(CaseInsensitiveDict(n2.attrib.copy()))
info['related'] = related
2018-08-30 19:24:33 -05:00
data['comprobante']['totalenletras'] = to_letters(
float(info['monto']), info['monedap'])
data['comprobante']['moneda'] = info['monedap']
return info
2021-06-29 19:07:55 -05:00
def get_data_from_xml(invoice, values, pdf_from='1'):
2018-02-02 13:46:15 -06:00
data = {'cancelada': invoice.cancelada, 'donativo': False}
2018-02-07 00:48:06 -06:00
if hasattr(invoice, 'donativo'):
2018-02-02 13:46:15 -06:00
data['donativo'] = invoice.donativo
2022-05-30 13:30:47 -05:00
2017-10-15 02:30:55 -05:00
doc = parse_xml(invoice.xml)
2017-11-11 15:03:20 -06:00
data['comprobante'] = _comprobante(doc, values)
2017-10-15 02:30:55 -05:00
version = data['comprobante']['version']
2017-10-15 17:20:20 -05:00
data['emisor'] = _emisor(doc, version, values)
data['receptor'] = _receptor(doc, version, values)
2018-02-02 13:46:15 -06:00
data['conceptos'] = _conceptos(doc, version, values)
2017-10-15 17:20:20 -05:00
data['totales'] = _totales(doc, data['comprobante'], version)
2017-11-25 20:32:36 -06:00
data['donataria'] = _donataria(doc, version, values['fechadof'])
2017-11-26 00:15:14 -06:00
data['ine'] = _ine(doc, version)
2017-10-15 17:20:20 -05:00
options = {
'rfc_emisor': data['emisor']['rfc'],
'rfc_receptor': data['receptor']['rfc'],
'total': data['comprobante']['total'],
}
2021-06-29 19:07:55 -05:00
data['timbre'] = _timbre(doc, version, options, pdf_from)
2017-10-25 19:46:13 -05:00
del data['timbre']['version']
2017-10-24 00:03:07 -05:00
data['comprobante'].update(data['timbre'])
2017-10-15 02:30:55 -05:00
2018-02-03 01:23:05 -06:00
data['nomina'] = _nomina(doc, data, values, version)
2018-08-30 19:24:33 -05:00
data['pagos'] = values.get('pagos', False)
if data['pagos']:
data['pays'] = _cfdipays(doc, data, version)
2019-02-04 22:13:11 -06:00
data['pakings'] = values.get('pakings', [])
2021-02-02 22:09:12 -06:00
data['el.version'] = values['el.version']
2021-06-29 19:07:55 -05:00
2017-10-25 19:46:13 -05:00
return data
2017-10-15 18:57:25 -05:00
def to_zip(*files):
zip_buffer = BytesIO()
with zipfile.ZipFile(zip_buffer, 'a', zipfile.ZIP_DEFLATED, False) as zip_file:
for data, file_name in files:
zip_file.writestr(file_name, data)
return zip_buffer.getvalue()
2017-10-16 00:02:51 -05:00
def make_fields(xml):
doc = ET.fromstring(xml)
data = CaseInsensitiveDict(doc.attrib.copy())
data.pop('certificado')
data.pop('sello')
version = data['version']
receptor = doc.find('{}Receptor'.format(PRE[version]))
receptor = CaseInsensitiveDict(receptor.attrib.copy())
data['receptor_nombre'] = receptor['nombre']
data['receptor_rfc'] = receptor['rfc']
data = {k.lower(): v for k, v in data.items()}
return data
def make_info_mail(data, fields):
2018-01-24 23:30:33 -06:00
try:
return data.format(**fields).replace('\n', '<br/>')
except:
log.error(data)
log.error(fields)
return data.replace('\n', '<br/>')
2017-10-16 00:02:51 -05:00
def send_mail(data):
msg = ''
server = SendMail(data['server'])
is_connect = server.is_connect
if is_connect:
msg = server.send(data['options'])
else:
msg = server.error
server.close()
return {'ok': is_connect, 'msg': msg}
2017-10-16 23:09:26 -05:00
2018-03-07 23:24:35 -06:00
def exists(path):
return os.path.exists(path)
2017-10-16 23:09:26 -05:00
def get_path_info(path):
path, filename = os.path.split(path)
name, extension = os.path.splitext(filename)
return (path, filename, name, extension)
2018-02-05 23:59:49 -06:00
def get_path_temp(s=''):
return tempfile.mkstemp(s)[1]
2017-10-23 00:45:41 -05:00
2017-10-28 22:21:39 -05:00
def get_date(value, next_day=False):
d = parser.parse(value)
if next_day:
return d + datetime.timedelta(days=1)
return d
2018-11-20 23:47:53 -06:00
class UpFile(object):
def __init__(self):
self._init_values()
def _init_values(self):
return
2022-05-29 22:57:26 -05:00
def save_template(rfc, opt, file_obj):
result = {'status': 'error', 'ok': False}
name_template = f'{rfc}{opt}'
path_template = _join(PATH_MEDIA, 'templates', name_template)
if save_file(path_template, file_obj.file.read()):
result = {'status': 'server', 'name': file_obj.filename, 'ok': True}
return result
2017-11-03 14:09:34 -06:00
def upload_file(rfc, opt, file_obj):
2018-11-20 23:47:53 -06:00
rfc = rfc.lower()
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
2022-06-04 21:52:28 -05:00
versions = ('_3.2.ods',
2023-03-21 10:55:16 -06:00
'_3.3.ods', '_3.3_cd_1.1.ods', '_3.3_cp_1.0.ods', '_3.3_cn_1.2.ods', '_3.3_ccp_2.0.ods', '_3.3.json',
2023-01-06 21:42:46 -06:00
'_4.0.ods',
'_4.0_cn_1.2.ods',
'_4.0_cp_2.0.ods',
'_4.0_ccp_2.0.ods',
2023-02-01 22:26:10 -06:00
'_4.0_cd_1.1.ods',
'_4.0.json',
)
2022-05-29 22:57:26 -05:00
if opt in versions:
return save_template(rfc, opt, file_obj)
2018-11-20 23:47:53 -06:00
EXTENSIONS = {
'txt_plantilla_factura_32': EXT['ODS'],
'txt_plantilla_factura_33': EXT['ODS'],
'txt_plantilla_factura_html': EXT['HTML'],
'txt_plantilla_factura_css': EXT['CSS'],
2019-01-10 23:37:37 -06:00
'txt_plantilla_factura_json': EXT['JSON'],
2018-11-20 23:47:53 -06:00
}
2017-11-25 00:17:46 -06:00
2018-11-20 23:47:53 -06:00
if opt in EXTENSIONS:
if ext != EXTENSIONS[opt]:
msg = (
f"Extensión de archivo incorrecta, "
f"selecciona un archivo {EXTENSIONS[opt].upper()}"
)
2017-11-03 20:05:19 -06:00
return {'status': 'server', 'name': msg, 'ok': False}
2018-11-20 23:47:53 -06:00
NAMES = {
'txt_plantilla_factura_32': f"{rfc}_3.2.ods",
'txt_plantilla_factura_33': f"{rfc}_3.3.ods",
'txt_plantilla_factura_html': f"{rfc}_3.3.html",
2019-01-10 23:37:37 -06:00
'txt_plantilla_factura_css': f"{rfc}.css",
'txt_plantilla_factura_json': f"{rfc}_3.3.json",
2018-11-20 23:47:53 -06:00
}
name = NAMES[opt]
paths = {
'txt_plantilla_factura_32': _join(PATHS['USER'], name),
'txt_plantilla_factura_33': _join(PATHS['USER'], name),
'txt_plantilla_factura_html': _join(PATHS['USER'], name),
'txt_plantilla_factura_css': _join(PATHS['CSS'], name),
2019-01-10 23:37:37 -06:00
'txt_plantilla_factura_json': _join(PATHS['USER'], name),
2018-11-20 23:47:53 -06:00
}
if save_file(paths[opt], file_obj.file.read()):
return {'status': 'server', 'name': file_obj.filename, 'ok': True}
return {'status': 'error', 'ok': False}
2017-11-25 00:17:46 -06:00
2018-11-20 23:47:53 -06:00
if opt == 'txt_plantilla_ticket':
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'ods':
msg = 'Extensión de archivo incorrecta, selecciona un archivo ODS'
return {'status': 'server', 'name': msg, 'ok': False}
name = '{}_ticket.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'templates', name)
2017-11-25 00:17:46 -06:00
elif opt == 'txt_plantilla_donataria':
2017-11-11 11:40:55 -06:00
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'ods':
msg = 'Extensión de archivo incorrecta, selecciona un archivo ODS'
return {'status': 'server', 'name': msg, 'ok': False}
2017-11-25 20:26:15 -06:00
name = '{}_3.3_donativo.ods'.format(rfc.lower())
2017-11-11 11:40:55 -06:00
path = _join(PATH_MEDIA, 'templates', name)
2018-02-02 13:46:15 -06:00
elif opt == 'txt_plantilla_nomina1233':
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'ods':
msg = 'Extensión de archivo incorrecta, selecciona un archivo ODS'
return {'status': 'server', 'name': msg, 'ok': False}
name = '{}_1.2_3.3.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'templates', name)
elif opt == 'txt_plantilla_pagos10':
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'ods':
msg = 'Extensión de archivo incorrecta, selecciona un archivo ODS'
return {'status': 'server', 'name': msg, 'ok': False}
name = '{}_pagos_1.0.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'templates', name)
2017-11-29 23:57:31 -06:00
elif opt == 'bdfl':
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'sqlite':
msg = 'Extensión de archivo incorrecta, selecciona un archivo SQLite'
return {'status': 'server', 'name': msg, 'ok': False}
name = '{}.sqlite'.format(rfc.lower())
path = _join('/tmp', name)
2018-01-19 15:42:00 -06:00
elif opt == 'products':
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'ods':
msg = 'Extensión de archivo incorrecta, selecciona un archivo ODS'
return {'status': 'server', 'name': msg, 'ok': False}
name = '{}_products.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'tmp', name)
2018-01-19 22:16:19 -06:00
elif opt == 'invoiceods':
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'ods':
msg = 'Extensión de archivo incorrecta, selecciona un archivo ODS'
return {'status': 'server', 'name': msg, 'ok': False}
name = '{}_invoice.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'tmp', name)
2018-01-19 01:00:22 -06:00
elif opt == 'employees':
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'ods':
msg = 'Extensión de archivo incorrecta, selecciona un archivo ODS'
return {'status': 'server', 'name': msg, 'ok': False}
name = '{}_employees.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'tmp', name)
2018-01-26 01:52:59 -06:00
elif opt == 'nomina':
tmp = file_obj.filename.split('.')
ext = tmp[-1].lower()
if ext != 'ods':
msg = 'Extensión de archivo incorrecta, selecciona un archivo ODS'
return {'status': 'server', 'name': msg, 'ok': False}
name = '{}_nomina.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'tmp', name)
2017-11-03 20:05:19 -06:00
if save_file(path, file_obj.file.read()):
return {'status': 'server', 'name': file_obj.filename, 'ok': True}
2017-11-03 14:09:34 -06:00
2017-11-24 10:47:15 -06:00
return {'status': 'error', 'ok': False}
2017-11-03 14:09:34 -06:00
2017-12-28 01:08:49 -06:00
def _get_pem_from_pfx(cert):
tmp_p12 = save_temp(cert.p12)
args = "openssl pkcs12 -in '{}' -clcerts -nodes -nocerts " \
"-passin pass:'{}' | openssl rsa".format(tmp_p12, _get_md5(cert.rfc))
result = _call(args)
_kill(tmp_p12)
return result.encode()
def cancel_xml(auth, uuid, certificado):
from .pac import Finkok as PAC
if DEBUG:
auth = {}
else:
if not auth:
msg = 'Sin datos para cancelar'
data = {'ok': False, 'error': msg}
return data, result
msg = 'Factura cancelada correctamente'
data = {'ok': True, 'msg': msg, 'row': {'estatus': 'Cancelada'}}
pac = PAC(auth)
result = pac.cancel_xml(certificado.rfc, str(uuid).upper(),
2017-12-28 01:08:49 -06:00
certificado.cer_pem.encode(), _get_pem_from_pfx(certificado))
if result:
codes = {None: '',
'Could not get UUID Text': 'UUID no encontrado',
'Invalid Passphrase': 'Contraseña inválida',
}
if not result['CodEstatus'] is None:
data['ok'] = False
data['msg'] = codes.get(result['CodEstatus'], result['CodEstatus'])
else:
data['ok'] = False
data['msg'] = pac.error
return data, result
def cancel_signature(uuid, pk12, rfc, auth):
2017-10-29 16:53:10 -06:00
from .pac import Finkok as PAC
token = _get_md5(rfc)
if USAR_TOKEN:
token = auth['PASS']
if AUTH['DEBUG']:
token = AUTH['PASS']
2017-10-29 16:53:10 -06:00
template = read_file(TEMPLATE_CANCEL, 'r')
data = {
'rfc': rfc,
'fecha': datetime.datetime.now().isoformat()[:19],
'uuid': str(uuid).upper(),
}
template = template.format(**data)
data = {
'xmlsec': PATH_XMLSEC,
2017-10-30 13:57:02 -06:00
'pk12': save_temp(pk12),
'pass': token,
2017-10-30 13:57:02 -06:00
'template': save_temp(template, 'w'),
2017-10-29 16:53:10 -06:00
}
args = '"{xmlsec}" --sign --pkcs12 "{pk12}" --pwd {pass} ' \
'"{template}"'.format(**data)
xml_sign = _call(args)
if DEBUG:
auth = {}
else:
if not auth:
msg = 'Sin datos para cancelar'
result = {'ok': False, 'error': msg}
return result
msg = 'Factura cancelada correctamente'
data = {'ok': True, 'msg': msg, 'row': {'estatus': 'Cancelada'}}
pac = PAC(auth)
result = pac.cancel_signature(xml_sign)
if result:
codes = {None: '',
'Could not get UUID Text': 'UUID no encontrado'}
if not result['CodEstatus'] is None:
data['ok'] = False
data['msg'] = codes.get(result['CodEstatus'], result['CodEstatus'])
else:
data['ok'] = False
data['msg'] = pac.error
return data, result
2017-11-06 22:21:14 -06:00
def run_in_thread(fn):
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return t
return run
def get_bool(value):
if not value:
return False
if value == '1':
return True
return False
2018-09-28 02:17:17 -05:00
def get_float(value, four=False):
if four:
return round(float(value), DECIMALES_TAX)
2017-11-21 00:48:51 -06:00
return round(float(value), DECIMALES)
2017-12-03 00:09:44 -06:00
def crear_rol(user, contra=''):
if not contra:
contra = user
args = 'psql -U postgres -c "CREATE ROLE {} WITH LOGIN ENCRYPTED ' \
'PASSWORD \'{}\';"'.format(user, contra)
try:
result = _call(args)
if result == 'CREATE ROLE\n':
return True
except Exception as e:
log.info(e)
return False
def crear_db(nombre):
args = 'psql -U postgres -c "CREATE DATABASE {0} WITH ' \
'OWNER {0};"'.format(nombre)
try:
result = _call(args)
print (result)
if result == 'CREATE DATABASE\n':
return True
except Exception as e:
log.info(e)
return False
2018-01-25 22:07:11 -06:00
def _backup_db(user):
dt = datetime.datetime.now().strftime('%y%m%d_%H%M')
path_bk = _join(PATH_MEDIA, 'tmp', '{}_{}.bk'.format(user, dt))
args = 'pg_dump -U postgres -Fc {} > "{}"'.format(user, path_bk)
_call(args)
return
def delete_db(user, bk=True):
if bk:
_backup_db(user)
args = 'psql -U postgres -c "DROP DATABASE {0};"'.format(user)
_call(args)
args = 'psql -U postgres -c "DROP ROLE {0};"'.format(user)
_call(args)
return
2017-12-10 02:04:40 -06:00
def _to_seafile(path_db, data):
if DEBUG:
return
2017-12-10 12:12:06 -06:00
_, filename = os.path.split(path_db)
if SEAFILE_SERVER:
msg = '\tSincronizando backup general...'
log.info(msg)
seafile = SeaFileAPI(
SEAFILE_SERVER['URL'],
SEAFILE_SERVER['USER'],
SEAFILE_SERVER['PASS'])
if seafile.is_connect:
msg = '\tSincronizando: {} '.format(filename)
log.info(msg)
seafile.update_file(
2017-12-10 14:35:03 -06:00
path_db, SEAFILE_SERVER['REPO'], '/', SEAFILE_SERVER['PASS'])
2017-12-10 12:12:06 -06:00
msg = '\tRespaldo general de {} sincronizado'.format(filename)
log.info(msg)
2017-12-10 14:35:03 -06:00
msg = '\tSin datos para sincronización particular de {}'.format(filename)
2017-12-10 12:12:06 -06:00
if len(data) < 2:
2017-12-10 14:35:03 -06:00
log.info(msg)
2017-12-10 12:12:06 -06:00
return
2017-12-10 14:35:03 -06:00
if not data[0] or not data[1] or not data[2]:
log.info(msg)
2017-12-10 12:12:06 -06:00
return
2017-12-10 14:35:03 -06:00
msg = '\tSincronizando backup particular...'
log.info(msg)
seafile = SeaFileAPI(SEAFILE_SERVER['URL'], data[0], data[1])
if seafile.is_connect:
msg = '\t\tSincronizando: {} '.format(filename)
log.info(msg)
seafile.update_file(path_db, data[2], 'Base de datos/', data[1])
msg = '\t\tRespaldo partícular de {} sincronizado'.format(filename)
log.info(msg)
2017-12-10 12:12:06 -06:00
2017-12-10 02:04:40 -06:00
return
2017-12-10 12:12:06 -06:00
@run_in_thread
2017-12-10 02:04:40 -06:00
def _backup_and_sync(rfc, data):
msg = 'Generando backup de: {}'.format(rfc)
log.info(msg)
2017-12-10 14:35:03 -06:00
sql = 'select correo_timbrado, token_timbrado, token_soporte from emisor;'
2017-12-10 02:04:40 -06:00
path_bk = _join(PATH_MEDIA, 'tmp', '{}.bk'.format(rfc.lower()))
if data['type'] == 'postgres':
args = 'pg_dump -U postgres -Fc {} > "{}"'.format(
data['name'], path_bk)
sql = 'psql -U postgres -d {} -Atc "{}"'.format(data['name'], sql)
elif data['type'] == 'sqlite':
args = 'gzip -c "{}" > "{}"'.format(data['name'], path_bk)
sql = 'sqlite3 "{}" "{}"'.format(data['name'], sql)
try:
result = _call(args)
2017-12-10 14:35:03 -06:00
msg = '\tBackup generado de {}'.format(rfc)
2017-12-10 02:04:40 -06:00
log.info(msg)
result = _call(sql).strip().split('|')
_to_seafile(path_bk, result)
except Exception as e:
log.info(e)
return
2017-12-31 17:32:22 -06:00
@run_in_thread
def _backup_companies():
2018-01-01 21:23:32 -06:00
if DEBUG:
return
2017-12-31 17:32:22 -06:00
_, filename = os.path.split(COMPANIES)
if SEAFILE_SERVER:
msg = '\tSincronizando backup RFCs...'
log.info(msg)
seafile = SeaFileAPI(
SEAFILE_SERVER['URL'],
SEAFILE_SERVER['USER'],
SEAFILE_SERVER['PASS'])
if seafile.is_connect:
msg = '\tSincronizando: {} '.format(filename)
log.info(msg)
seafile.update_file(
COMPANIES, SEAFILE_SERVER['REPO'], '/', SEAFILE_SERVER['PASS'])
msg = '\tRespaldo general de {} sincronizado'.format(filename)
log.info(msg)
return
2017-12-10 00:59:39 -06:00
def backup_dbs():
con = sqlite3.connect(COMPANIES)
cursor = con.cursor()
sql = "SELECT * FROM names"
cursor.execute(sql)
rows = cursor.fetchall()
if rows is None:
return
cursor.close()
con.close()
for rfc, data in rows:
args = loads(data)
2017-12-10 12:12:06 -06:00
_backup_and_sync(rfc, args)
2017-12-10 02:04:40 -06:00
2017-12-31 17:32:22 -06:00
_backup_companies()
2017-12-10 00:59:39 -06:00
return
2017-12-13 01:13:48 -06:00
def _validar_directorios(path_bk, target):
path = Path(_join(path_bk, target))
path.mkdir(parents=True, exist_ok=True)
return str(path)
def local_copy(files):
if not MV:
return
2017-12-17 22:55:32 -06:00
path_bk = _join(str(Path.home()), DIR_FACTURAS)
2017-12-13 01:13:48 -06:00
if not os.path.isdir(path_bk):
msg = 'No existe la carpeta: facturas'
log.error(msg)
return
2020-12-29 21:53:51 -06:00
# ~ args = 'df -P {} | tail -1 | cut -d" " -f 1'.format(path_bk)
# ~ try:
# ~ result = _call(args)
2018-02-16 13:52:30 -06:00
# ~ log.info(result)
2020-12-29 21:53:51 -06:00
# ~ except:
# ~ pass
2017-12-17 22:38:19 -06:00
# ~ if result != 'empresalibre\n':
# ~ log.info(result)
# ~ msg = 'Asegurate de que exista la carpeta para sincronizar'
# ~ log.error(msg)
# ~ return
2017-12-17 22:55:32 -06:00
# ~ except subprocess.CalledProcessError:
# ~ msg = 'No se pudo obtener la ruta para sincronizar'
# ~ log.error(msg)
# ~ return
2017-12-13 01:13:48 -06:00
try:
for obj, name, target in files:
path = _validar_directorios(path_bk, target)
path_file = _join(path, name)
m = 'wb'
if name.endswith('xml'):
m = 'w'
save_file(path_file, obj, m)
except Exception as e:
log.error(e)
return
2018-09-05 23:47:55 -05:00
def sync_files(files, auth={}):
if not MV:
return
path_bk = _join(str(Path.home()), DIR_FACTURAS)
if not os.path.isdir(path_bk):
msg = 'No existe la carpeta: facturas'
log.error(msg)
return
for obj, name, target in files:
path = _validar_directorios(path_bk, target)
path_file = _join(path, name)
m = 'wb'
if name.endswith('xml'):
m = 'w'
save_file(path_file, obj, m)
return
2020-12-29 22:05:02 -06:00
def sync_cfdi(files):
2017-12-13 01:13:48 -06:00
local_copy(files)
2017-12-10 19:45:19 -06:00
if DEBUG:
return
2020-12-29 21:53:51 -06:00
# ~ if not auth['REPO'] or not SEAFILE_SERVER:
# ~ return
2017-12-13 01:13:48 -06:00
2020-12-29 21:53:51 -06:00
# ~ seafile = SeaFileAPI(SEAFILE_SERVER['URL'], auth['USER'], auth['PASS'])
# ~ if seafile.is_connect:
# ~ for f in files:
# ~ seafile.update_file(
# ~ f, auth['REPO'], 'Facturas/{}/'.format(f[2]), auth['PASS'])
2017-12-10 19:45:19 -06:00
return
2018-02-07 00:48:06 -06:00
class ImportCFDI(object):
def __init__(self, xml):
self._doc = xml
self._pre = ''
def _relacionados(self):
data = {}
node = self._doc.find('{}CfdiRelacionados'.format(self._pre))
if not node is None:
data = CaseInsensitiveDict(node.attrib.copy())
return data
2018-02-07 00:48:06 -06:00
def _emisor(self):
emisor = self._doc.find('{}Emisor'.format(self._pre))
data = CaseInsensitiveDict(emisor.attrib.copy())
node = emisor.find('{}RegimenFiscal'.format(self._pre))
if not node is None:
data['regimen_fiscal'] = node.attrib['Regimen']
return data
def _receptor(self):
node = self._doc.find('{}Receptor'.format(self._pre))
data = CaseInsensitiveDict(node.attrib.copy())
node = node.find('{}Domicilio'.format(self._pre))
if not node is None:
data.update(node.attrib.copy())
return data
def _conceptos(self):
data = []
conceptos = self._doc.find('{}Conceptos'.format(self._pre))
2020-12-31 20:17:00 -06:00
for c in list(conceptos):
2018-02-07 00:48:06 -06:00
values = CaseInsensitiveDict(c.attrib.copy())
data.append(values)
return data
def _impuestos(self):
data = {}
node = self._doc.find('{}Impuestos'.format(self._pre))
if not node is None:
data = CaseInsensitiveDict(node.attrib.copy())
return data
def _timbre(self):
node = self._doc.find('{}Complemento/{}TimbreFiscalDigital'.format(
self._pre, PRE['TIMBRE']))
data = CaseInsensitiveDict(node.attrib.copy())
data.pop('SelloCFD', None)
data.pop('SelloSAT', None)
data.pop('Version', None)
return data
def get_data(self):
invoice = CaseInsensitiveDict(self._doc.attrib.copy())
invoice.pop('certificado', '')
invoice.pop('sello', '')
self._pre = PRE[invoice['version']]
relacionados = self._relacionados()
2018-02-07 00:48:06 -06:00
emisor = self._emisor()
receptor = self._receptor()
conceptos = self._conceptos()
impuestos = self._impuestos()
timbre = self._timbre()
invoice.update(relacionados)
2018-02-07 00:48:06 -06:00
invoice.update(emisor)
invoice.update(receptor)
invoice.update(impuestos)
invoice.update(timbre)
data = {
'invoice': invoice,
'emisor': emisor,
'receptor': receptor,
'conceptos': conceptos,
}
return data
2018-01-01 02:14:30 -06:00
def print_ticket(data, info):
p = PrintTicket(info)
return p.printer(data)
2018-01-19 15:42:00 -06:00
def import_products(rfc):
name = '{}_products.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'tmp', name)
if not is_file(path):
2018-01-19 22:16:19 -06:00
return (), 'No se encontró la plantilla'
2018-01-19 15:42:00 -06:00
if APP_LIBO:
app = LIBO()
if app.is_running:
return app.products(path)
2018-01-19 22:16:19 -06:00
return (), 'No se encontro LibreOffice'
def import_invoice(rfc):
name = '{}_invoice.ods'.format(rfc.lower())
path = _join(PATH_MEDIA, 'tmp', name)
if not is_file(path):
return (), 'No se encontró la plantilla'
if APP_LIBO:
app = LIBO()
if app.is_running:
return app.invoice(path)
return (), 'No se encontro LibreOffice'
2018-01-24 00:51:09 -06:00
def calc_to_date(value):
return datetime.date.fromordinal(int(value) + 693594)
2018-01-28 03:12:35 -06:00
def get_days(start, end):
return (end - start).days + 1
def log_file(name, msg='', kill=False):
path = _join(PATH_MEDIA, 'tmp', '{}.log'.format(name))
if kill:
_kill(path)
return
with open(path, 'a') as fh:
2018-01-29 20:42:13 -06:00
line = '{} : {}\n'.format(str(now()), msg)
2018-01-28 03:12:35 -06:00
fh.write(line)
return
2018-01-29 21:57:44 -06:00
def get_log(name):
data = ''
name = '{}.log'.format(name)
path = _join(PATH_MEDIA, 'tmp', name)
if is_file(path):
data = open(path).read()
return data, name
2018-01-30 00:41:43 -06:00
2018-07-25 16:27:53 -05:00
def get_timbres(auth):
from .pac import Finkok as PAC
2018-02-13 23:12:21 -06:00
if DEBUG:
2018-07-25 16:27:53 -05:00
return '-d'
2018-02-13 23:12:21 -06:00
2018-07-25 16:27:53 -05:00
pac = PAC(auth)
timbres = pac.client_get_timbres(auth['RFC'])
if pac.error:
return '-e'
return timbres
2018-05-23 23:42:57 -05:00
def truncate(value):
return trunc(value * 100) / 100
2018-06-03 00:00:04 -05:00
def validate_path_bk():
path_bk = _join(str(Path.home()), DIR_FACTURAS)
if not os.path.isdir(path_bk):
msg = 'No existe la carpeta'
return {'ok': False, 'msg': msg}
return {'ok': True, 'msg': path_bk}
def respaldar_db(values, path_bk):
user = values[0].lower()
db = loads(values[1])['name']
path = _join(path_bk, '{}.bk'.format(user))
args = 'pg_dump -U postgres -Fc {} > "{}"'.format(db, path)
_call(args)
return
2018-10-05 23:13:03 -05:00
def validate_rfc(value):
msg = ''
if len(value) < 12:
msg = 'Longitud inválida del RFC'
return msg
l = 4
if len(value)==12:
l = 3
s = value[0:l]
r = re.match('[A-ZÑ&]{%s}' % l, s)
msg = 'Caracteres inválidos al {} del RFC'
if not r:
return msg.format('inicio')
s = value[-3:]
r = re.match('[A-Z0-9]{3}', s)
if not r:
return msg.format('final')
s = value[l:l+6]
r = re.match('[0-9]{6}', s)
msg = 'Fecha inválida en el RFC'
if not r:
return msg
try:
datetime.datetime.strptime(s, '%y%m%d')
return ''
except:
return msg
2018-11-19 01:03:48 -06:00
def parse_xml2(xml_str):
return etree.fromstring(xml_str.encode('utf-8'))
2019-02-03 22:43:27 -06:00