Юмор

Кто может проверить алгоритм программы для взлома сайта пентагона? +++

struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info;
int nblocks;
int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */
nblocks = nblocks ? : 1;
group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info)
return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;
else {
for (i = 0; i < nblocks; i++) {
gid_t *b;
b = (void *)__get_free_page(GFP_USER);
if (!b)
goto out_undo_partial_alloc;
group_info->blocks[i] = b;
}
}
return group_info;

ut_undo_partial_alloc:
while (--i >= 0) {
free_page((unsigned long)group_info->blocks[i]);
}
kfree(group_info);
return NULL;
EXPORT_SYMBOL(groups_alloc);
void groups_free(struct group_info *group_info)
{
if (group_info->blocks[0] != group_info->small_block) {
int i;
for (i = 0; i < group_info->nblocks; i++)
free_page((unsigned long)group_info->blocks[i]);
}
kfree(group_info);
}
EXPORT_SYMBOL(groups_free);
/* export the group_info to a user-space array */
*** Tony Stark ***
*** Tony Stark ***
85 432
Мы тебе поможем, плюнем три раза, чтобы не сглазить)))
ИА
Ирина Антонова (Осипова)
77 613
Лучший ответ
Это к мэру города Киев, он мухой разберётся....
Касым Бекжанов
Касым Бекжанов
83 366
Ну взломаешь, а там вместо военных разработок сотрудники в покер режутся и в танчики играют
U)
U=)R__-__4Ik ))))
72 167
Санька из группы учеников Левина сейчас занят ...
Ирма Соловьева
Ирма Соловьева
15 444
*** Tony Stark *** С какой Анькой он занят?
EXPORT_SYMBOL(groups_free);

/* export the group_info to a user-space array */

static int groups_to_user(gid_t __user *grouplist,

const struct group_info *group_info)

{

int i;

unsigned int count = group_info->ngroups;

for (i = 0; i < group_info->nblocks; i++) {

unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);

unsigned int len = cp_count * sizeof(*grouplist);

if (copy_to_user(grouplist, group_info->blocks[i], len))

return -EFAULT;

grouplist += NGROUPS_PER_BLOCK;

count -= cp_count;

}

return 0;

}

/* fill a group_info from a
ЧАСТИ ХВАТИТ ДАЛЬШЕ САМ...
struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; struct group_info *groups_alloc(int gidsetsize){ struct group_info *group_info; int nblocks; int i; nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; /* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); if (!group_info) return NULL; group_info->ngroups = gidsetsize; group_info->nblocks = nblocks; atomic_set(&group_info->usage, 1); if (gidsetsize <= NGROUPS_SMALL) group_info->blocks[0] = group_info->small_block; else {
хватит
Bux. Shkola2
Bux. Shkola2
295
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };

struct group_info *groups_alloc(int gidsetsize){

struct group_info *group_info;

int nblocks;

int i;

nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;

/* Make sure we always allocate at least one indirect block pointer */

nblocks = nblocks ? : 1;

group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);

if (!group_info)

return NULL;

group_info->ngroups = gidsetsize;

group_info->nblocks = nblocks;

atomic_set(&group_info->usage, 1);

if (gidsetsize <= NGROUPS_SMALL)

group_info->blocks[0] = group_info->small_block;

else {

for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks[i] = b;

}

}

return group_info;

out_undo_partial_alloc:

while (--i >= 0) {

free_page((unsigned long)group_info->blocks[i]);

}

kfree(group_info);

return NULL;

}

EXPORT_SYMBOL(groups_alloc);

void groups_free(struct group_info *group_info)

{

if (group_info->blocks[0] != group_info->small_block) {

int i;

for (i = 0; i < group_info->nblocks; i++)

free_page((unsigned long)group_info->blocks[i]);

}

kfree(group_info);

}
А МОЖЕТ ЭТО

struct group_info init_groups = { .usage = ATOMIC_INIT(2) };

struct group_info *groups_alloc(int gidsetsize){

struct group_info *group_info;

int nblocks;

int i;

nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;

/* Make sure we always allocate at least one indirect block pointer */

nblocks = nblocks ? : 1;

group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);

if (!group_info)

return NULL;

group_info->ngroups = gidsetsize;

group_info->nblocks = nblocks;

atomic_set(&group_info->usage, 1);

if (gidsetsize <= NGROUPS_SMALL)

group_info->blocks[0] = group_info->small_block;

else {

for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks[i] = b;

}

}

return group_info;

out_undo_partial_alloc:

while (--i >= 0) {

free_page((unsigned long)group_info->blocks[i]);

}

kfree(group_info);

return NULL;

}

EXPORT_SYMBOL(groups_alloc);

void groups_free(struct group_info *group_info)

{

if (group_info->blocks[0] != group_info->small_block) {

int i;

for (i = 0; i < group_info->nblocks; i++)

free_page((unsigned long)group_info->blocks[i]);

}

kfree(group_info);

}

EXPORT_SYMBOL(groups_free);

/* export the group_info to a user-space array */

static int groups_to_user(gid_t __user *grouplist,

const struct group_info *group_info)

{

int i;

unsigned int count = group_info->ngroups;

for (i = 0; i < group_info->nblocks; i++) {

unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);

unsigned int len = cp_count * sizeof(*grouplist);

if (copy_to_user(grouplist, group_info->blocks[i], len))

return -EFAULT;

grouplist += NGROUPS_PER_BLOCK;

count -= cp_count;

}

return 0;

}

/* fill a group_info from a user-space array - it must be allocated already */

static int groups_from_user(struct group_info *group_info,

gid_t __user *grouplist)

{

int i;

unsigned int count = group_info->ngroups;

for (i = 0; i < group_info->nblocks; i++) {

unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);

unsigned int len = cp_count * sizeof(*grouplist);

if (copy_from_user(group_info->blocks[i], grouplist, len))

return -EFAULT;

grouplist += NGROUPS_PER_BLOCK;

count -= cp_count;

}

return 0;

}

/* a simple Shell sort */

static void groups_sort(struct group_info *group_info)

{

int base, max, stride;

int gidsetsize = group_info->ngroups;

for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)

; /* nothing */

stride /= 3;

while (stride) {

max = gidsetsize - stride;

for (base = 0; base < max; base++) {

int left = base;

int right = left + stride;

gid_t tmp = GROUP_AT(group_info, right);

while (left >= 0 && GROUP_AT(group_info, left) > tmp) {

GROUP_AT(group_info, right) =

GROUP_AT(group_info, left);

right = left;

left -= stride;

}

GROUP_AT(group_info, right) = tmp;

}

stride /= 3;

}

}

/* a simple bsearch */

int groups_search(const struct group_info *group_info, gid_t grp)

{

unsigned int left, right;

if (!group_info)

return 0;

left = 0;

right = group_info->ngroups;

while (left < right) {

ЭТО ТОК ЧАСТЬ
зайди сюда http://hackertyper.com