u32 rsize = le32_to_cpu(attr->res.data_size);
u32 tail = used - aoff - asize;
char *next = Add2Ptr(attr, asize);
- s64 dsize = QuadAlign(new_size) - QuadAlign(rsize);
+ s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
if (dsize < 0) {
memmove(next + dsize, next, tail);
#define PtrOffset(B, O) ((size_t)((size_t)(O) - (size_t)(B)))
#endif
-#define QuadAlign(n) (((n) + 7u) & (~7u))
-#define IsQuadAligned(n) (!((size_t)(n)&7u))
-#define Quad2Align(n) (((n) + 15u) & (~15u))
-#define IsQuad2Aligned(n) (!((size_t)(n)&15u))
-#define Quad4Align(n) (((n) + 31u) & (~31u))
-#define IsSizeTAligned(n) (!((size_t)(n) & (sizeof(size_t) - 1)))
-#define DwordAlign(n) (((n) + 3u) & (~3u))
-#define IsDwordAligned(n) (!((size_t)(n)&3u))
-#define WordAlign(n) (((n) + 1u) & (~1u))
-#define IsWordAligned(n) (!((size_t)(n)&1u))
-
#ifdef CONFIG_PRINTK
__printf(2, 3)
void ntfs_printk(const struct super_block *sb, const char *fmt, ...);
if (err < 0)
goto out;
- run_size = QuadAlign(err);
+ run_size = ALIGN(err, 8);
err = 0;
if (plen < svcn) {
if (err < 0)
goto out;
- run_size = QuadAlign(err);
+ run_size = ALIGN(err, 8);
err = 0;
if (plen < evcn + 1 - svcn) {
struct ATTRIB *attr;
bool is_ext =
(flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn;
- u32 name_size = QuadAlign(name_len * sizeof(short));
+ u32 name_size = ALIGN(name_len * sizeof(short), 8);
u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT;
u32 run_off = name_off + name_size;
u32 run_size, asize;
if (err < 0)
goto out;
- run_size = QuadAlign(err);
+ run_size = ALIGN(err, 8);
if (plen < len) {
err = -EINVAL;
struct ATTRIB **new_attr, struct mft_inode **mi)
{
int err;
- u32 name_size = QuadAlign(name_len * sizeof(short));
- u32 asize = SIZEOF_RESIDENT + name_size + QuadAlign(data_size);
+ u32 name_size = ALIGN(name_len * sizeof(short), 8);
+ u32 asize = SIZEOF_RESIDENT + name_size + ALIGN(data_size, 8);
struct ATTRIB *attr;
err = ni_insert_attr(ni, type, name, name_len, asize, SIZEOF_RESIDENT,
} else if (!attr->non_res) {
u32 data_size = le32_to_cpu(attr->res.data_size);
- dup->alloc_size = cpu_to_le64(QuadAlign(data_size));
+ dup->alloc_size = cpu_to_le64(ALIGN(data_size, 8));
dup->data_size = cpu_to_le64(data_size);
} else {
u64 new_valid = ni->i_valid;
return false;
ro = le16_to_cpu(rhdr->ra_off);
- if (!IsQuadAligned(ro) || ro > sys_page)
+ if (!IS_ALIGNED(ro, 8) || ro > sys_page)
return false;
end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
off = le16_to_cpu(ra->client_off);
- if (!IsQuadAligned(off) || ro + off > SECTOR_SIZE - sizeof(short))
+ if (!IS_ALIGNED(off, 8) || ro + off > SECTOR_SIZE - sizeof(short))
return false;
off += cl * sizeof(struct CLIENT_REC);
}
/* The log page data offset and record header length must be quad-aligned */
- if (!IsQuadAligned(le16_to_cpu(ra->data_off)) ||
- !IsQuadAligned(le16_to_cpu(ra->rec_hdr_len)))
+ if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
+ !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
return false;
return true;
log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO;
/* Compute the log page values */
- log->data_off = QuadAlign(
+ log->data_off = ALIGN(
offsetof(struct RECORD_PAGE_HDR, fixups) +
- sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1));
+ sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1), 8);
log->data_size = log->page_size - log->data_off;
log->record_header_len = sizeof(struct LFS_RECORD_HDR);
log->reserved = log->data_size - log->record_header_len;
/* Compute the restart page values. */
- log->ra_off = QuadAlign(
+ log->ra_off = ALIGN(
offsetof(struct RESTART_HDR, fixups) +
- sizeof(short) * ((log->sys_page_size >> SECTOR_SHIFT) + 1));
+ sizeof(short) * ((log->sys_page_size >> SECTOR_SHIFT) + 1), 8);
log->restart_size = log->sys_page_size - log->ra_off;
log->ra_size = struct_size(log->ra, clients, 1);
log->current_openlog_count = open_log_count;
vbo = hdr_off + log->data_off;
} else {
- vbo = QuadAlign(end);
+ vbo = ALIGN(end, 8);
}
/* Compute the lsn based on the file offset and the sequence count */
__le16 flags)
{
struct ATTRIB *attr;
- u32 name_size = QuadAlign(name_len * sizeof(short));
+ u32 name_size = ALIGN(name_len * sizeof(short), 8);
bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED);
u32 asize = name_size +
(is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT);
goto dirty_vol;
memmove(attr, attr2, dlen);
- rec->used = cpu_to_le32(QuadAlign(roff + dlen));
+ rec->used = cpu_to_le32(ALIGN(roff + dlen, 8));
mi->dirty = true;
break;
used = le32_to_cpu(rec->used);
if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT ||
- !IsQuadAligned(asize) ||
+ !IS_ALIGNED(asize, 8) ||
Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) ||
dlen > record_size - used) {
goto dirty_vol;
if (nsize > asize && nsize - asize > record_size - used)
goto dirty_vol;
- nsize = QuadAlign(nsize);
+ nsize = ALIGN(nsize, 8);
data_off = le16_to_cpu(attr->res.data_off);
if (nsize < asize) {
goto dirty_vol;
}
- nsize = QuadAlign(nsize);
+ nsize = ALIGN(nsize, 8);
memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
used - le16_to_cpu(lrh->record_off) - asize);
rh->sys_page_size = cpu_to_le32(log->page_size);
rh->page_size = cpu_to_le32(log->page_size);
- t16 = QuadAlign(offsetof(struct RESTART_HDR, fixups) +
- sizeof(short) * t16);
+ t16 = ALIGN(offsetof(struct RESTART_HDR, fixups) +
+ sizeof(short) * t16, 8);
rh->ra_off = cpu_to_le16(t16);
rh->minor_ver = cpu_to_le16(1); // 0x1A:
rh->major_ver = cpu_to_le16(1); // 0x1C:
sbi->security.next_id = SECURITY_ID_FIRST;
/* Always write new security at the end of bucket */
sbi->security.next_off =
- Quad2Align(sds_size - SecurityDescriptorsBlockSize);
+ ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
off = 0;
ne = NULL;
struct NTFS_DE_SII sii_e;
struct SECURITY_HDR *d_security;
u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
- u32 aligned_sec_size = Quad2Align(new_sec_size);
+ u32 aligned_sec_size = ALIGN(new_sec_size, 16);
struct SECURITY_KEY hash_key;
struct ntfs_fnd *fnd_sdh = NULL;
const struct INDEX_ROOT *root_sdh;
if (max_idx >= nslots) {
u16 *ptr;
- int new_slots = QuadAlign(2 * nslots);
+ int new_slots = ALIGN(2 * nslots, 8);
ptr = ntfs_malloc(sizeof(u16) * new_slots);
if (ptr)
index->rhdr.fix_num = cpu_to_le16(fn);
index->vbn = cpu_to_le64(vbn);
hdr = &index->ihdr;
- eo = QuadAlign(sizeof(struct INDEX_BUFFER) + fn * sizeof(short));
+ eo = ALIGN(sizeof(struct INDEX_BUFFER) + fn * sizeof(short), 8);
hdr->de_off = cpu_to_le32(eo);
e = Add2Ptr(hdr, eo);
fname->dup.ea_size = fname->dup.reparse = 0;
dsize = le16_to_cpu(new_de->key_size);
- asize = QuadAlign(SIZEOF_RESIDENT + dsize);
+ asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
attr->type = ATTR_NAME;
attr->size = cpu_to_le32(asize);
if (security_id == SECURITY_ID_INVALID) {
/* Insert security attribute */
- asize = SIZEOF_RESIDENT + QuadAlign(sd_size);
+ asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
attr->type = ATTR_SECURE;
attr->size = cpu_to_le32(asize);
attr->id = cpu_to_le16(aid++);
/* resident or non resident? */
- asize = QuadAlign(SIZEOF_RESIDENT + nsize);
+ asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
t16 = PtrOffset(rec, attr);
if (asize + t16 + 8 > sbi->record_size) {
goto out5;
}
- asize = SIZEOF_NONRESIDENT + QuadAlign(err);
+ asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
inode->i_size = nsize;
} else {
attr->res.data_off = SIZEOF_RESIDENT_LE;
fname->type = FILE_NAME_POSIX;
data_size = fname_full_size(fname);
- e->size = cpu_to_le16(QuadAlign(data_size) + sizeof(struct NTFS_DE));
+ e->size = cpu_to_le16(ALIGN(data_size, 8) + sizeof(struct NTFS_DE));
e->key_size = cpu_to_le16(data_size);
e->flags = 0;
e->res = 0;
return attr->non_res ? ((attr->flags &
(ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ?
le64_to_cpu(attr->nres.total_size) :
- le64_to_cpu(attr->nres.alloc_size)) :
- QuadAlign(le32_to_cpu(attr->res.data_size));
+ le64_to_cpu(attr->nres.alloc_size))
+ : ALIGN(le32_to_cpu(attr->res.data_size), 8);
}
static inline u64 attr_size(const struct ATTRIB *attr)
static inline u32 le_size(u8 name_len)
{
- return QuadAlign(offsetof(struct ATTR_LIST_ENTRY, name) +
- name_len * sizeof(short));
+ return ALIGN(offsetof(struct ATTR_LIST_ENTRY, name) +
+ name_len * sizeof(short), 8);
}
/* returns 0 if 'attr' has the same type and name */
sizeof(__le64)));
}
-#define MAX_BYTES_PER_NAME_ENTRY \
- QuadAlign(sizeof(struct NTFS_DE) + \
- offsetof(struct ATTR_FILE_NAME, name) + \
- NTFS_NAME_LEN * sizeof(short))
+#define MAX_BYTES_PER_NAME_ENTRY \
+ ALIGN(sizeof(struct NTFS_DE) + \
+ offsetof(struct ATTR_FILE_NAME, name) + \
+ NTFS_NAME_LEN * sizeof(short), 8)
struct INDEX_HDR {
__le32 de_off; // 0x00: The offset from the start of this structure
/* NTFS uses quad aligned bitmaps */
static inline size_t bitmap_size(size_t bits)
{
- return QuadAlign((bits + 7) >> 3);
+ return ALIGN((bits + 7) >> 3, 8);
}
#define _100ns2seconds 10000000
return NULL;
if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
- !IsDwordAligned(off)) {
+ !IS_ALIGNED(off, 4)) {
return NULL;
}
/* Can we use the first field (attr->type) */
if (off + 8 > used) {
- static_assert(QuadAlign(sizeof(enum ATTR_TYPE)) == 8);
+ static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
return NULL;
}
next = Add2Ptr(attr, asize);
if (bytes > 0) {
- dsize = QuadAlign(bytes);
+ dsize = ALIGN(bytes, 8);
if (used + dsize > total)
return false;
nsize = asize + dsize;
used += dsize;
rsize += dsize;
} else {
- dsize = QuadAlign(-bytes);
+ dsize = ALIGN(-bytes, 8);
if (dsize > asize)
return false;
nsize = asize - dsize;
return err;
}
- new_run_size = QuadAlign(err);
+ new_run_size = ALIGN(err, 8);
memmove(next + new_run_size - run_size, next + dsize, tail);
sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
sbi->max_bytes_per_attr =
- record_size - QuadAlign(MFTRECORD_FIXUP_OFFSET_1) -
- QuadAlign(((record_size >> SECTOR_SHIFT) * sizeof(short))) -
- QuadAlign(sizeof(enum ATTR_TYPE));
+ record_size - ALIGN(MFTRECORD_FIXUP_OFFSET_1, 8) -
+ ALIGN(((record_size >> SECTOR_SHIFT) * sizeof(short)), 8) -
+ ALIGN(sizeof(enum ATTR_TYPE), 8);
sbi->index_size = boot->index_size < 0
? 1u << (-boot->index_size)
rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET_1);
fn = (sbi->record_size >> SECTOR_SHIFT) + 1;
rec->rhdr.fix_num = cpu_to_le16(fn);
- ao = QuadAlign(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn);
+ ao = ALIGN(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn, 8);
rec->attr_off = cpu_to_le16(ao);
- rec->used = cpu_to_le32(ao + QuadAlign(sizeof(enum ATTR_TYPE)));
+ rec->used = cpu_to_le32(ao + ALIGN(sizeof(enum ATTR_TYPE), 8));
rec->total = cpu_to_le32(sbi->record_size);
((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
{
return ea->size ? le32_to_cpu(ea->size)
- : DwordAlign(struct_size(
- ea, name,
- 1 + ea->name_len + le16_to_cpu(ea->elength)));
+ : ALIGN(struct_size(
+ ea, name,
+ 1 + ea->name_len + le16_to_cpu(ea->elength)), 4);
}
static inline size_t packed_ea_size(const struct EA_FULL *ea)
goto out;
}
- add = DwordAlign(struct_size(ea_all, name, 1 + name_len + val_size));
+ add = ALIGN(struct_size(ea_all, name, 1 + name_len + val_size), 4);
err = ntfs_read_ea(ni, &ea_all, add, &info);
if (err)