- Remplacement de DocuSeal par solution souveraine Odentas Sign - Système d'authentification OTP pour signataires (bcryptjs + JWT) - 8 routes API: send-otp, verify-otp, sign, pdf-url, positions, status, webhook, signers - Interface moderne avec canvas de signature et animations (framer-motion, confetti) - Système de templates pour auto-détection des positions de signature (CDDU, RG, avenants) - PDF viewer avec @react-pdf-viewer (compatible Next.js) - Stockage S3: source/, signatures/, evidence/, signed/, certs/ - Tables Supabase: sign_requests, signers, sign_positions, sign_events, sign_assets - Evidence bundle automatique (JSON metadata + timestamps) - Templates emails: OTP et completion - Scripts Lambda prêts: pades-sign (KMS seal) et tsaStamp (RFC3161) - Mode test détecté automatiquement (emails whitelist) - Tests complets avec PDF CDDU réel (2 signataires)
531 lines
18 KiB
JavaScript
531 lines
18 KiB
JavaScript
import * as asn1js from 'asn1js';
|
|
import {
|
|
Certificate,
|
|
SignedData,
|
|
ContentInfo,
|
|
IssuerAndSerialNumber,
|
|
Attribute,
|
|
AlgorithmIdentifier,
|
|
EncapsulatedContentInfo,
|
|
SignerInfo,
|
|
SignedAndUnsignedAttributes
|
|
} from 'pkijs';
|
|
import crypto from 'node:crypto';
|
|
import { Buffer } from 'node:buffer';
|
|
|
|
// pkijs setup (webcrypto global)
|
|
if (typeof globalThis.crypto === 'undefined') {
|
|
globalThis.crypto = crypto.webcrypto;
|
|
}
|
|
|
|
// OIDs
|
|
const OID_ID_DATA = '1.2.840.113549.1.7.1';
|
|
const OID_ATTR_CONTENT_TYPE = '1.2.840.113549.1.9.3';
|
|
const OID_ATTR_SIGNING_TIME = '1.2.840.113549.1.9.5';
|
|
const OID_ATTR_MESSAGE_DIGEST = '1.2.840.113549.1.9.4';
|
|
|
|
/**
|
|
* Étape 1: Préparer le PDF avec les vraies valeurs ByteRange calculées
|
|
* Stratégie PROFESSIONNELLE: Construire SANS ByteRange, calculer positions, reconstruire AVEC ByteRange
|
|
*/
|
|
export async function preparePdfWithPlaceholder(pdfBytes) {
|
|
const originalPdf = Buffer.from(pdfBytes);
|
|
const pdfStructure = parsePdfStructure(originalPdf);
|
|
|
|
// Générer le timestamp UNE SEULE FOIS
|
|
const signingTime = new Date().toISOString().replace(/[-:T.Z]/g, '').slice(0, 14);
|
|
|
|
// Taille fixe pour /Contents (32KB)
|
|
const contentsPlaceholder = '<' + '0'.repeat(65536) + '>'; // 65538 chars total avec < >
|
|
|
|
// PASSE 1: Construire avec un placeholder ByteRange de taille fixe
|
|
// Le placeholder doit avoir la même taille que le vrai ByteRange qu'on mettra après
|
|
// Format: [0000000000 0000000000 0000000000 0000000000] = 47 chars avec les crochets
|
|
const byteRangePlaceholder = '[0000000000 0000000000 0000000000 0000000000]';
|
|
|
|
console.log('[preparePdfWithPlaceholder] PASSE 1: Construction avec placeholder ByteRange...');
|
|
const incrementalUpdate1 = buildIncrementalUpdate(
|
|
pdfStructure,
|
|
byteRangePlaceholder, // Placeholder de même taille que le vrai
|
|
contentsPlaceholder,
|
|
signingTime
|
|
);
|
|
|
|
const pdf1 = assemblePdfWithRevision(originalPdf, pdfStructure, incrementalUpdate1);
|
|
|
|
// Trouver la position du /Contents
|
|
const pdf1Str = pdf1.toString('latin1');
|
|
const contentsMatch = pdf1Str.match(/\/Contents <(0+)>/);
|
|
if (!contentsMatch) throw new Error('Placeholder /Contents non trouvé');
|
|
|
|
const contentsStart = contentsMatch.index + '/Contents <'.length;
|
|
const contentsEnd = contentsStart + contentsMatch[1].length;
|
|
const byteRange = [0, contentsStart, contentsEnd, pdf1.length - contentsEnd];
|
|
|
|
console.log('[preparePdfWithPlaceholder] ByteRange calculé:', byteRange);
|
|
|
|
// PASSE 2: Reconstruire avec le VRAI ByteRange (même longueur que placeholder grâce au padding)
|
|
console.log('[preparePdfWithPlaceholder] PASSE 2: Reconstruction avec vraies valeurs...');
|
|
|
|
// Padder le ByteRange pour qu'il ait exactement la même longueur que le placeholder
|
|
const byteRangeStr = `[${byteRange[0]} ${byteRange[1]} ${byteRange[2]} ${byteRange[3]}]`;
|
|
if (byteRangeStr.length > byteRangePlaceholder.length) {
|
|
throw new Error(`ByteRange trop grand: ${byteRangeStr.length} > ${byteRangePlaceholder.length}`);
|
|
}
|
|
|
|
// Padding avec espaces à droite pour avoir exactement la même taille
|
|
const byteRangePadded = byteRangeStr + ' '.repeat(byteRangePlaceholder.length - byteRangeStr.length);
|
|
|
|
const incrementalUpdate2 = buildIncrementalUpdate(
|
|
pdfStructure,
|
|
byteRangePadded, // String paddée de même longueur
|
|
contentsPlaceholder,
|
|
signingTime
|
|
);
|
|
|
|
const pdfWithRevision = assemblePdfWithRevision(originalPdf, pdfStructure, incrementalUpdate2);
|
|
|
|
// Vérifier que les positions n'ont PAS changé
|
|
const pdf2Str = pdfWithRevision.toString('latin1');
|
|
const contents2Match = pdf2Str.match(/\/Contents <(0+)>/);
|
|
const contents2Start = contents2Match.index + '/Contents <'.length;
|
|
const contents2End = contents2Start + contents2Match[1].length;
|
|
|
|
if (contents2Start !== contentsStart || contents2End !== contentsEnd) {
|
|
console.error('[preparePdfWithPlaceholder] Position mismatch!');
|
|
console.error(' PASSE 1: contentsStart=', contentsStart, 'contentsEnd=', contentsEnd);
|
|
console.error(' PASSE 2: contentsStart=', contents2Start, 'contentsEnd=', contents2End);
|
|
throw new Error('Les positions ByteRange ont changé entre les deux constructions !');
|
|
}
|
|
|
|
console.log('[preparePdfWithPlaceholder] ✅ Positions vérifiées, PDF prêt');
|
|
|
|
return {
|
|
pdfWithRevision,
|
|
byteRange,
|
|
contentsPlaceholder,
|
|
signingTime
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Parser la structure PDF pour extraire les références nécessaires
|
|
*/
|
|
function parsePdfStructure(pdfBytes) {
|
|
const pdfStr = pdfBytes.toString('latin1');
|
|
|
|
// Trouver le dernier startxref
|
|
const startxrefMatches = [...pdfStr.matchAll(/startxref\s+(\d+)/g)];
|
|
if (startxrefMatches.length === 0) throw new Error('startxref non trouvé');
|
|
const prevStartxref = parseInt(startxrefMatches[startxrefMatches.length - 1][1], 10);
|
|
|
|
// Trouver le plus grand numéro d'objet
|
|
const objMatches = [...pdfStr.matchAll(/(\d+) \d+ obj/g)];
|
|
const maxObjNum = Math.max(...objMatches.map(m => parseInt(m[1], 10)));
|
|
const nextObjNum = maxObjNum + 1;
|
|
|
|
// Trouver /Root (catalog)
|
|
const rootMatch = pdfStr.match(/\/Root\s+(\d+)\s+0\s+R/);
|
|
if (!rootMatch) throw new Error('/Root non trouvé');
|
|
const rootRef = parseInt(rootMatch[1], 10);
|
|
|
|
// Trouver /Pages
|
|
const pagesMatch = pdfStr.match(/\/Pages\s+(\d+)\s+0\s+R/);
|
|
const pagesRef = pagesMatch ? parseInt(pagesMatch[1], 10) : null;
|
|
|
|
// Trouver la première page
|
|
const firstPageMatch = pdfStr.match(/(\d+)\s+0\s+obj\s*<<[^>]*\/Type\s*\/Page[^>]*>>/);
|
|
const firstPageRef = firstPageMatch ? parseInt(firstPageMatch[1], 10) : null;
|
|
|
|
// Trouver /AcroForm existant
|
|
const acroFormMatch = pdfStr.match(/\/AcroForm\s+(\d+)\s+0\s+R/);
|
|
const acroFormRef = acroFormMatch ? parseInt(acroFormMatch[1], 10) : null;
|
|
|
|
// Trouver /Info
|
|
const infoMatch = pdfStr.match(/\/Info\s+(\d+)\s+0\s+R/);
|
|
const infoRef = infoMatch ? parseInt(infoMatch[1], 10) : null;
|
|
|
|
return {
|
|
prevStartxref,
|
|
nextObjNum,
|
|
rootRef,
|
|
pagesRef,
|
|
firstPageRef,
|
|
acroFormRef,
|
|
infoRef
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Construire les nouveaux objets PDF pour la signature
|
|
* Si byteRange est une string, c'est un placeholder. Si c'est un array, ce sont les vraies valeurs.
|
|
*/
|
|
function buildIncrementalUpdate(pdfStructure, byteRange, contentsPlaceholder, signingTime) {
|
|
const { nextObjNum, rootRef, pagesRef, firstPageRef } = pdfStructure;
|
|
|
|
let objNum = nextObjNum;
|
|
const newObjects = [];
|
|
|
|
// 1. TransformParams (DocMDP Level 1)
|
|
const transformParamsObjNum = objNum++;
|
|
newObjects.push(`${transformParamsObjNum} 0 obj
|
|
<<
|
|
/Type /TransformParams
|
|
/V /1.2
|
|
/P 1
|
|
>>
|
|
endobj
|
|
`);
|
|
|
|
// 2. Signature dictionary - ByteRange avec placeholder ou vraies valeurs
|
|
const sigObjNum = objNum++;
|
|
let sigObj = `${sigObjNum} 0 obj
|
|
<<
|
|
/Type /Sig
|
|
/Filter /Adobe.PPKLite
|
|
/SubFilter /ETSI.CAdES.detached
|
|
`;
|
|
|
|
// Ajouter ByteRange - soit placeholder (passe 1) soit valeurs réelles paddées (passe 2)
|
|
// Dans les deux cas c'est une string de même longueur
|
|
sigObj += `/ByteRange ${byteRange}\n`;
|
|
|
|
sigObj += `/Contents ${contentsPlaceholder}
|
|
/M (D:${signingTime})
|
|
/Reference [<<
|
|
/Type /SigRef
|
|
/TransformMethod /DocMDP
|
|
/TransformParams ${transformParamsObjNum} 0 R
|
|
>>]
|
|
>>
|
|
endobj
|
|
`;
|
|
newObjects.push(sigObj);
|
|
|
|
// 3. Widget annotation
|
|
const widgetObjNum = objNum++;
|
|
newObjects.push(`${widgetObjNum} 0 obj
|
|
<<
|
|
/Type /Annot
|
|
/Subtype /Widget
|
|
/FT /Sig
|
|
/T (Signature1)
|
|
/V ${sigObjNum} 0 R
|
|
/P ${firstPageRef} 0 R
|
|
/Rect [0 0 0 0]
|
|
/F 132
|
|
>>
|
|
endobj
|
|
`);
|
|
|
|
// 4. AcroForm
|
|
const acroFormObjNum = objNum++;
|
|
newObjects.push(`${acroFormObjNum} 0 obj
|
|
<<
|
|
/Fields [${widgetObjNum} 0 R]
|
|
/SigFlags 3
|
|
>>
|
|
endobj
|
|
`);
|
|
|
|
// 5. Perms dictionary
|
|
const permsObjNum = objNum++;
|
|
newObjects.push(`${permsObjNum} 0 obj
|
|
<<
|
|
/DocMDP ${sigObjNum} 0 R
|
|
>>
|
|
endobj
|
|
`);
|
|
|
|
// 6. Updated Catalog - IMPORTANT: conserver /Pages de l'original !
|
|
const catalogObjNum = objNum++;
|
|
newObjects.push(`${catalogObjNum} 0 obj
|
|
<<
|
|
/Type /Catalog
|
|
/Pages ${pagesRef} 0 R
|
|
/AcroForm ${acroFormObjNum} 0 R
|
|
/Perms ${permsObjNum} 0 R
|
|
>>
|
|
endobj
|
|
`);
|
|
|
|
return {
|
|
newObjects,
|
|
catalogObjNum,
|
|
sigObjNum,
|
|
nextObjNum: objNum
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Assembler le PDF avec la révision incrémentale
|
|
*/
|
|
function assemblePdfWithRevision(originalPdf, pdfStructure, incrementalUpdate) {
|
|
let currentOffset = originalPdf.length;
|
|
const parts = [originalPdf, Buffer.from('\n', 'latin1')];
|
|
currentOffset += 1;
|
|
|
|
// Ajouter les nouveaux objets et calculer leurs offsets
|
|
const xrefEntries = [];
|
|
for (let i = 0; i < incrementalUpdate.newObjects.length; i++) {
|
|
const objStr = incrementalUpdate.newObjects[i];
|
|
const objBuf = Buffer.from(objStr, 'latin1');
|
|
|
|
xrefEntries.push({
|
|
objNum: pdfStructure.nextObjNum + i,
|
|
offset: currentOffset,
|
|
gen: 0
|
|
});
|
|
|
|
parts.push(objBuf);
|
|
currentOffset += objBuf.length;
|
|
}
|
|
|
|
// Construire la table xref
|
|
const xrefOffset = currentOffset;
|
|
let xrefTable = 'xref\n0 1\n0000000000 65535 f \n';
|
|
xrefTable += `${pdfStructure.nextObjNum} ${xrefEntries.length}\n`;
|
|
|
|
for (const entry of xrefEntries) {
|
|
xrefTable += `${String(entry.offset).padStart(10, '0')} ${String(entry.gen).padStart(5, '0')} n \n`;
|
|
}
|
|
|
|
// Construire le trailer
|
|
let trailer = `trailer\n<<\n/Size ${pdfStructure.nextObjNum + xrefEntries.length}\n/Prev ${pdfStructure.prevStartxref}\n/Root ${incrementalUpdate.catalogObjNum} 0 R\n`;
|
|
if (pdfStructure.infoRef) {
|
|
trailer += `/Info ${pdfStructure.infoRef} 0 R\n`;
|
|
}
|
|
trailer += `>>\nstartxref\n${xrefOffset}\n%%EOF\n`;
|
|
|
|
parts.push(Buffer.from(xrefTable + trailer, 'latin1'));
|
|
|
|
return Buffer.concat(parts);
|
|
}
|
|
|
|
/**
|
|
* Étape 2: Calculer le digest des SignedAttributes
|
|
* Le ByteRange est déjà dans le PDF, on le reçoit en paramètre
|
|
*/
|
|
export function buildSignedAttributesDigest(pdfWithRevision, byteRange, signingTime) {
|
|
console.log('[buildSignedAttributesDigest] ByteRange:', byteRange);
|
|
|
|
// Calculer le digest PDF (sur les parties définies par ByteRange)
|
|
const part1 = pdfWithRevision.slice(byteRange[0], byteRange[1]);
|
|
const part2 = pdfWithRevision.slice(byteRange[2], byteRange[2] + byteRange[3]);
|
|
|
|
const pdfDigest = crypto.createHash('sha256').update(part1).update(part2).digest();
|
|
console.log('[buildSignedAttributesDigest] PDF digest:', pdfDigest.toString('hex'));
|
|
|
|
// Construire les SignedAttributes ASN.1 avec le signingTime passé en paramètre
|
|
const attrContentType = new Attribute({
|
|
type: OID_ATTR_CONTENT_TYPE,
|
|
values: [new asn1js.ObjectIdentifier({ value: OID_ID_DATA })]
|
|
});
|
|
|
|
// Utiliser le même timestamp que celui du PDF (/M) pour garantir la cohérence
|
|
const signingDate = new Date(
|
|
parseInt(signingTime.substring(0, 4)), // year
|
|
parseInt(signingTime.substring(4, 6)) - 1, // month (0-indexed)
|
|
parseInt(signingTime.substring(6, 8)), // day
|
|
parseInt(signingTime.substring(8, 10)), // hour
|
|
parseInt(signingTime.substring(10, 12)), // minute
|
|
parseInt(signingTime.substring(12, 14)) // second
|
|
);
|
|
|
|
const attrSigningTime = new Attribute({
|
|
type: OID_ATTR_SIGNING_TIME,
|
|
values: [new asn1js.UTCTime({ valueDate: signingDate })]
|
|
});
|
|
|
|
const attrMessageDigest = new Attribute({
|
|
type: OID_ATTR_MESSAGE_DIGEST,
|
|
values: [new asn1js.OctetString({ valueHex: pdfDigest })]
|
|
});
|
|
|
|
// Pour calculer le digest, on doit encoder les attributs comme un SET avec tag IMPLICIT [0]
|
|
const signedAttrsForDigest = new asn1js.Set({
|
|
value: [
|
|
attrContentType.toSchema(),
|
|
attrSigningTime.toSchema(),
|
|
attrMessageDigest.toSchema()
|
|
]
|
|
});
|
|
|
|
// Encoder et calculer le digest des SignedAttributes
|
|
const signedAttrsDer = Buffer.from(signedAttrsForDigest.toBER());
|
|
const signedAttrsDigest = crypto.createHash('sha256').update(signedAttrsDer).digest();
|
|
console.log('[buildSignedAttributesDigest] SignedAttributes digest:', signedAttrsDigest.toString('hex'));
|
|
|
|
return {
|
|
signedAttrs: [attrContentType, attrSigningTime, attrMessageDigest], // Retourner les objets Attribute
|
|
signedAttrsDigest,
|
|
byteRange,
|
|
pdfDigest
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Étape 3: Construire le CMS SignedData avec la signature KMS
|
|
*/
|
|
export async function buildCmsSignedData(signedAttrs, signatureBytes, chainPem) {
|
|
console.log('[buildCmsSignedData] Parsing certificate chain...');
|
|
console.log('[buildCmsSignedData] Chain PEM length:', chainPem.length, 'bytes');
|
|
|
|
// Parser la chaîne de certificats
|
|
const chainStr = chainPem.toString('utf8');
|
|
console.log('[buildCmsSignedData] Chain string preview:', chainStr.substring(0, 100));
|
|
|
|
const certPems = chainStr.match(/-----BEGIN CERTIFICATE-----[\s\S]+?-----END CERTIFICATE-----/g);
|
|
console.log('[buildCmsSignedData] Found', certPems ? certPems.length : 0, 'certificates');
|
|
|
|
if (!certPems || certPems.length === 0) {
|
|
throw new Error('Aucun certificat trouvé dans chain.pem');
|
|
}
|
|
|
|
const certificates = [];
|
|
for (let i = 0; i < certPems.length; i++) {
|
|
const pem = certPems[i];
|
|
try {
|
|
const b64 = pem.replace(/-----BEGIN CERTIFICATE-----/, '').replace(/-----END CERTIFICATE-----/, '').replace(/\s/g, '');
|
|
const der = Buffer.from(b64, 'base64');
|
|
console.log('[buildCmsSignedData] Cert', i, 'DER length:', der.length, 'bytes');
|
|
|
|
// asn1js attend un ArrayBuffer, pas un Buffer Node.js
|
|
const asn1Cert = asn1js.fromBER(der.buffer.slice(der.byteOffset, der.byteOffset + der.byteLength));
|
|
if (asn1Cert.offset === -1) {
|
|
console.error('[buildCmsSignedData] ASN.1 parsing failed for cert', i);
|
|
throw new Error(`Erreur parsing certificat ${i}`);
|
|
}
|
|
|
|
const cert = new Certificate({ schema: asn1Cert.result });
|
|
certificates.push(cert);
|
|
console.log('[buildCmsSignedData] Cert', i, 'parsed successfully');
|
|
} catch (err) {
|
|
console.error('[buildCmsSignedData] Error parsing cert', i, ':', err.message);
|
|
throw err;
|
|
}
|
|
}
|
|
|
|
const signerCert = certificates[0];
|
|
console.log('[buildCmsSignedData] Signer certificate parsed successfully');
|
|
|
|
// Construire SignerInfo
|
|
const signerInfo = new SignerInfo({
|
|
version: 1,
|
|
sid: new IssuerAndSerialNumber({
|
|
issuer: signerCert.issuer,
|
|
serialNumber: signerCert.serialNumber
|
|
}),
|
|
signedAttrs: new SignedAndUnsignedAttributes({
|
|
type: 0,
|
|
attributes: signedAttrs // Utiliser directement les objets Attribute
|
|
})
|
|
});
|
|
|
|
// Algorithme de signature (RSA-PSS avec SHA-256)
|
|
signerInfo.digestAlgorithm = new AlgorithmIdentifier({
|
|
algorithmId: '2.16.840.1.101.3.4.2.1' // SHA-256
|
|
});
|
|
|
|
signerInfo.signatureAlgorithm = new AlgorithmIdentifier({
|
|
algorithmId: '1.2.840.113549.1.1.10', // RSASSA-PSS
|
|
algorithmParams: new asn1js.Sequence({
|
|
value: [
|
|
new asn1js.Constructed({
|
|
idBlock: { tagClass: 3, tagNumber: 0 },
|
|
value: [
|
|
new asn1js.Sequence({
|
|
value: [
|
|
new asn1js.ObjectIdentifier({ value: '2.16.840.1.101.3.4.2.1' }), // SHA-256
|
|
new asn1js.Null()
|
|
]
|
|
})
|
|
]
|
|
}),
|
|
new asn1js.Constructed({
|
|
idBlock: { tagClass: 3, tagNumber: 1 },
|
|
value: [
|
|
new asn1js.Sequence({
|
|
value: [
|
|
new asn1js.ObjectIdentifier({ value: '1.2.840.113549.1.1.8' }), // MGF1
|
|
new asn1js.Sequence({
|
|
value: [
|
|
new asn1js.ObjectIdentifier({ value: '2.16.840.1.101.3.4.2.1' }), // SHA-256
|
|
new asn1js.Null()
|
|
]
|
|
})
|
|
]
|
|
})
|
|
]
|
|
}),
|
|
new asn1js.Constructed({
|
|
idBlock: { tagClass: 3, tagNumber: 2 },
|
|
value: [new asn1js.Integer({ value: 32 })]
|
|
})
|
|
]
|
|
})
|
|
});
|
|
|
|
signerInfo.signature = new asn1js.OctetString({ valueHex: signatureBytes });
|
|
|
|
// Construire SignedData
|
|
const signedData = new SignedData({
|
|
version: 1,
|
|
digestAlgorithms: [new AlgorithmIdentifier({ algorithmId: '2.16.840.1.101.3.4.2.1' })],
|
|
encapContentInfo: new EncapsulatedContentInfo({ eContentType: OID_ID_DATA }),
|
|
certificates,
|
|
signerInfos: [signerInfo]
|
|
});
|
|
|
|
// Construire ContentInfo
|
|
const contentInfo = new ContentInfo({
|
|
contentType: '1.2.840.113549.1.7.2', // SignedData
|
|
content: signedData.toSchema(true)
|
|
});
|
|
|
|
const cmsDer = Buffer.from(contentInfo.toSchema().toBER());
|
|
console.log('[buildCmsSignedData] CMS SignedData length:', cmsDer.length, 'bytes');
|
|
|
|
return cmsDer;
|
|
}
|
|
|
|
/**
|
|
* Étape 4: Finaliser le PDF avec la signature CMS
|
|
* Le ByteRange est déjà correct dans le PDF, on remplace UNIQUEMENT /Contents
|
|
*/
|
|
export function finalizePdfWithCms(pdfWithRevision, byteRange, cmsHex) {
|
|
console.log('[finalizePdfWithCms] Injecting CMS signature, length:', cmsHex.length);
|
|
|
|
// Trouver le placeholder /Contents et le remplacer
|
|
// IMPORTANT: Utiliser Buffer.from/Buffer.concat pour éviter les problèmes d'encodage
|
|
const pdfStr = pdfWithRevision.toString('latin1');
|
|
const contentsMatch = pdfStr.match(/\/Contents <(0+)>/);
|
|
if (!contentsMatch) throw new Error('Placeholder /Contents non trouvé');
|
|
|
|
const contentsStart = contentsMatch.index + '/Contents <'.length;
|
|
const placeholderLength = contentsMatch[1].length;
|
|
|
|
// Vérifier que la signature tient dans le placeholder
|
|
if (cmsHex.length > placeholderLength) {
|
|
throw new Error(`Signature CMS trop grande: ${cmsHex.length} > ${placeholderLength}`);
|
|
}
|
|
|
|
// Pad la signature avec des zeros
|
|
const cmsHexPadded = cmsHex.padEnd(placeholderLength, '0');
|
|
|
|
// Construire le PDF final en remplaçant uniquement le contenu entre les < >
|
|
const before = pdfWithRevision.slice(0, contentsStart);
|
|
const signature = Buffer.from(cmsHexPadded, 'latin1');
|
|
const after = pdfWithRevision.slice(contentsStart + placeholderLength);
|
|
|
|
const finalPdf = Buffer.concat([before, signature, after]);
|
|
|
|
// VALIDATION: Recalculer le digest pour vérifier
|
|
const part1 = finalPdf.slice(byteRange[0], byteRange[1]);
|
|
const part2 = finalPdf.slice(byteRange[2], byteRange[2] + byteRange[3]);
|
|
const validationDigest = crypto.createHash('sha256').update(part1).update(part2).digest();
|
|
|
|
console.log('[finalizePdfWithCms] VALIDATION - PDF digest recalculé:', validationDigest.toString('hex'));
|
|
|
|
return finalPdf;
|
|
}
|