您的位置:首页 > 产品设计 > UI/UE

考察二进制向int或uint转换时的最高位处理

2013-06-29 13:19 309 查看
//最高位的变化
#include "stdio.h"
#include "cstring"
using namespace std;

int  main(){
unsigned short A = 10;//2B,16bits,0000 0000 0000 1010
//若unsigned short输出,~A = 1111 1111 1111 0101 = 2^16 - 11 = 1024*64-11

printf("0xFFF5 = %d\n", 0xFFF5);
printf("0xFFFFFFF5 = %d\n", 0xFFFFFFF5);
printf("0xFFFFFFF5 = %u\n", 0xFFFFFFF5);//Uint输出,4B
int s = sizeof(unsigned short);

printf("sizeof(unsigned short) = %d\n", s);

printf("~A = %u\n", ~A); //Uint输出,4B

char c=128; //128 二进制 1000 0000 ,
printf("c=%c\n",c);//单个字符输出,1B 不是128
//printf("c=%s\n",c);//error

int ss = sizeof(char);

printf("sizeof(char) = %d\n", ss);

printf("c=%d\n",c);//int输出,-128
//单字节二进制 1000 0000,转化为4B---1111 1111 1111 1111 1111 1111 1000 0000
//负数,取反加1---1000,0000,0000,0000,0000,0000,0111,1111+1

printf("c=%u\n",c);//c=4294967168

printf("c=%u\n",0xFFFFFF80);

return 0;
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: